repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
pchauncey/ansible | test/units/modules/network/ios/test_ios_ping.py | 20 | 2742 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_ping
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosPingModule(TestIosModule):
''' Class used for Unit Tests agains ios_ping module '''
module = ios_ping
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.ios.ios_ping.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module = args
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('ios_ping_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_ping_expected_success(self):
''' Test for successful pings when destination should be reachable '''
set_module_args(dict(count=2, dest="8.8.8.8"))
self.execute_module()
def test_ios_ping_expected_failure(self):
''' Test for unsuccessful pings when destination should not be reachable '''
set_module_args(dict(count=2, dest="10.255.255.250", state="absent", timeout=45))
self.execute_module()
def test_ios_ping_unexpected_success(self):
''' Test for successful pings when destination should not be reachable - FAIL. '''
set_module_args(dict(count=2, dest="8.8.8.8", state="absent"))
self.execute_module(failed=True)
def test_ios_ping_unexpected_failure(self):
''' Test for unsuccessful pings when destination should be reachable - FAIL. '''
set_module_args(dict(count=2, dest="10.255.255.250", timeout=45))
self.execute_module(failed=True)
| gpl-3.0 |
kkochubey1/docker-sikuli-novnc | web/log/config.py | 15 | 3429 | #!/usr/bin/env python
import sys
import logging
import logging.handlers
#The terminal has 8 colors with codes from 0 to 7
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
#The background is set with 40 plus the number of the color,
#and the foreground with 30
COLORS = {
'WARNING': COLOR_SEQ % (30 + YELLOW) + 'WARN ' + RESET_SEQ,
'INFO': COLOR_SEQ % (30 + WHITE) + 'INFO ' + RESET_SEQ,
'DEBUG': COLOR_SEQ % (30 + BLUE) + 'DEBUG' + RESET_SEQ,
'CRITICAL': COLOR_SEQ % (30 + YELLOW) + 'CRITI' + RESET_SEQ,
'ERROR': COLOR_SEQ % (30 + RED) + 'ERROR' + RESET_SEQ,
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
if self.use_color:
record.levelname = COLORS.get(record.levelname, record.levelname)
return logging.Formatter.format(self, record)
class LoggingConfiguration(object):
COLOR_FORMAT = "[%(asctime)s" + \
"][%(threadName)-22s][%(levelname)s] %(message)s " + \
"(" + BOLD_SEQ + "%(filename)s" + RESET_SEQ + ":%(lineno)d)"
NO_COLOR_FORMAT = "[%(asctime)s][%(threadName)-22s][%(levelname)s] " + \
"%(message)s " + \
"(%(filename)s:%(lineno)d)"
FILE_FORMAT = "[%(asctime)s][%(threadName)-22s][%(levelname)s] " + \
"%(message)s "
@classmethod
def set(cls, log_level, log_filename, append=None, **kwargs):
""" Configure a rotating file logging
"""
logger = logging.getLogger()
logger.setLevel(log_level)
COLOR_FORMAT = cls.COLOR_FORMAT
NO_COLOR_FORMAT = cls.NO_COLOR_FORMAT
FILE_FORMAT = cls.FILE_FORMAT
if 'name' in kwargs:
COLOR_FORMAT = COLOR_FORMAT.replace('%(threadName)-22s',
'%-22s' % (kwargs['name']))
NO_COLOR_FORMAT = NO_COLOR_FORMAT.replace(
'%(threadName)-22s', '%-22s' % (kwargs['name']))
FILE_FORMAT = FILE_FORMAT.replace(
'%(threadName)-22s', '%s' % (kwargs['name']))
# Log to rotating file
try:
fh = logging.handlers.RotatingFileHandler(log_filename,
mode='a+',
backupCount=3)
fh = logging.FileHandler(log_filename, mode='a+')
fh.setFormatter(ColoredFormatter(FILE_FORMAT, False))
fh.setLevel(log_level)
logger.addHandler(fh)
if not append:
# Create a new log file on every new
fh.doRollover()
except:
pass
# Log to sys.stderr using log level passed through command line
if log_level != logging.NOTSET:
log_handler = logging.StreamHandler(sys.stdout)
if sys.platform.find('linux') >= 0:
formatter = ColoredFormatter(COLOR_FORMAT)
else:
formatter = ColoredFormatter(NO_COLOR_FORMAT, False)
log_handler.setFormatter(formatter)
log_handler.setLevel(log_level)
logger.addHandler(log_handler)
| apache-2.0 |
tkinz27/ansible-modules-core | cloud/linode/linode.py | 142 | 18004 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: linode
short_description: create / delete / stop / restart an instance in Linode Public Cloud
description:
- creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'.
version_added: "1.3"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted']
default: present
api_key:
description:
- Linode API key
default: null
name:
description:
- Name to give the instance (alphanumeric, dashes, underscore)
- To keep sanity on the Linode Web Console, name is prepended with LinodeID_
default: null
type: string
linode_id:
description:
- Unique ID of a linode server
aliases: lid
default: null
type: integer
plan:
description:
- plan to use for the instance (Linode plan)
default: null
type: integer
payment_term:
description:
- payment term to use for the instance (payment term in months)
default: 1
type: integer
choices: [1, 12, 24]
password:
description:
- root password to apply to a new server (auto generated if missing)
default: null
type: string
ssh_pub_key:
description:
- SSH public key applied to root user
default: null
type: string
swap:
description:
- swap size in MB
default: 512
type: integer
distribution:
description:
- distribution to use for the instance (Linode Distribution)
default: null
type: integer
datacenter:
description:
- datacenter to create an instance in (Linode Datacenter)
default: null
type: integer
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements:
- "python >= 2.6"
- "linode-python"
- "pycurl"
author: "Vincent Viallet (@zbal)"
notes:
- LINODE_API_KEY env variable can be used instead
'''
EXAMPLES = '''
# Create a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Ensure a running server (create if missing)
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Delete a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: absent
# Stop a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: stopped
# Reboot a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: restarted
'''
import time
import os
try:
import pycurl
HAS_PYCURL = True
except ImportError:
HAS_PYCURL = False
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
punct = ''.join(random.choice(string.punctuation) for x in range(6))
p = lower + upper + number + punct
return ''.join(random.sample(p, len(p)))
def getInstanceDetails(api, server):
'''
Return the details of an instance, populating IPs, etc.
'''
instance = {'id': server['LINODEID'],
'name': server['LABEL'],
'public': [],
'private': []}
# Populate with ips
for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
if ip['ISPUBLIC'] and 'ipv4' not in instance:
instance['ipv4'] = ip['IPADDRESS']
instance['fqdn'] = ip['RDNS_NAME']
if ip['ISPUBLIC']:
instance['public'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
else:
instance['private'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout):
instances = []
changed = False
new_server = False
servers = []
disks = []
configs = []
jobs = []
# See if we can match an existing server details with the provided linode_id
if linode_id:
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
configs = api.linode_config_list(LinodeId=linode_id)
# Act on the state
if state in ('active', 'present', 'started'):
# TODO: validate all the plan / distribution / datacenter are valid
# Multi step process/validation:
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in ('name', 'plan', 'distribution', 'datacenter'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create linode entity
new_server = True
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name))
# Save server
servers = api.linode_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not disks:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create disks (1 from distrib, 1 for SWAP)
new_server = True
try:
if not password:
# Password is required on creation, if not provided generate one
password = randompass()
if not swap:
swap = 512
# Create data disk
size = servers[0]['TOTALHD'] - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
jobs.append(res['JobID'])
except Exception, e:
# TODO: destroy linode ?
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not configs:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Check architecture
for distrib in api.avail_distributions():
if distrib['DISTRIBUTIONID'] != distribution:
continue
arch = '32'
if distrib['IS64BIT']:
arch = '64'
break
# Get latest kernel matching arch
for kernel in api.avail_kernels():
if not kernel['LABEL'].startswith('Latest %s' % arch):
continue
kernel_id = kernel['KERNELID']
break
# Get disk list
disks_id = []
for disk in api.linode_disk_list(LinodeId=linode_id):
if disk['TYPE'] == 'ext3':
disks_id.insert(0, str(disk['DISKID']))
continue
disks_id.append(str(disk['DISKID']))
# Trick to get the 9 items in the list
while len(disks_id) < 9:
disks_id.append('')
disks_list = ','.join(disks_id)
# Create config
new_server = True
try:
api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
Disklist=disks_list, Label='%s config' % name)
configs = api.linode_config_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
# Start / Ensure servers are running
for server in servers:
# Refresh server state
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# Ensure existing servers are up and running, boot if necessary
if server['STATUS'] != 1:
res = api.linode_boot(LinodeId=linode_id)
jobs.append(res['JobID'])
changed = True
# wait here until the instances are up
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
# refresh the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# status:
# -2: Boot failed
# 1: Running
if server['STATUS'] in (-2, 1):
break
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
(server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg = '%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
instance = getInstanceDetails(api, server)
# depending on wait flag select the status
if wait:
instance['status'] = 'Running'
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
instances.append(instance)
elif state in ('stopped'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
if server['STATUS'] != 2:
try:
res = api.linode_shutdown(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Stopping'
changed = True
else:
instance['status'] = 'Stopped'
instances.append(instance)
elif state in ('restarted'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
try:
res = api.linode_reboot(LinodeId=server['LINODEID'])
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)
try:
api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Deleting'
changed = True
instances.append(instance)
# Ease parsing if only 1 instance
if len(instances) == 1:
module.exit_json(changed=changed, instance=instances[0])
module.exit_json(changed=changed, instances=instances)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
api_key = dict(),
name = dict(type='str'),
plan = dict(type='int'),
distribution = dict(type='int'),
datacenter = dict(type='int'),
linode_id = dict(type='int', aliases=['lid']),
payment_term = dict(type='int', default=1, choices=[1, 12, 24]),
password = dict(type='str'),
ssh_pub_key = dict(type='str'),
swap = dict(type='int', default=512),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300),
)
)
if not HAS_PYCURL:
module.fail_json(msg='pycurl required for this module')
if not HAS_LINODE:
module.fail_json(msg='linode-python required for this module')
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
plan = module.params.get('plan')
distribution = module.params.get('distribution')
datacenter = module.params.get('datacenter')
linode_id = module.params.get('linode_id')
payment_term = module.params.get('payment_term')
password = module.params.get('password')
ssh_pub_key = module.params.get('ssh_pub_key')
swap = module.params.get('swap')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError, e:
module.fail_json(msg = 'Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
babycaseny/poedit | deps/boost/libs/python/pyste/src/Pyste/HeaderExporter.py | 54 | 3065 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from Exporter import Exporter
from ClassExporter import ClassExporter
from FunctionExporter import FunctionExporter
from EnumExporter import EnumExporter
from VarExporter import VarExporter
from infos import *
from declarations import *
import os.path
import exporters
import MultipleCodeUnit
#==============================================================================
# HeaderExporter
#==============================================================================
class HeaderExporter(Exporter):
'Exports all declarations found in the given header'
def __init__(self, info, parser_tail=None):
Exporter.__init__(self, info, parser_tail)
def WriteInclude(self, codeunit):
pass
def IsInternalName(self, name):
'''Returns true if the given name looks like a internal compiler
structure'''
return name.startswith('_')
def Export(self, codeunit, exported_names):
header = os.path.normpath(self.parser_header)
for decl in self.declarations:
# check if this declaration is in the header
location = os.path.abspath(decl.location[0])
if location == header and not self.IsInternalName(decl.name):
# ok, check the type of the declaration and export it accordingly
self.HandleDeclaration(decl, codeunit, exported_names)
def HandleDeclaration(self, decl, codeunit, exported_names):
'''Dispatch the declaration to the appropriate method, that must create
a suitable info object for a Exporter, create a Exporter, set its
declarations and append it to the list of exporters.
'''
dispatch_table = {
Class : ClassExporter,
Enumeration : EnumExporter,
Function : FunctionExporter,
Variable : VarExporter,
}
exporter_class = dispatch_table.get(type(decl))
if exporter_class is not None:
self.HandleExporter(decl, exporter_class, codeunit, exported_names)
def HandleExporter(self, decl, exporter_type, codeunit, exported_names):
# only export complete declarations
if not decl.incomplete:
info = self.info[decl.name]
info.name = decl.FullName()
info.include = self.info.include
exporter = exporter_type(info)
exporter.SetDeclarations(self.declarations)
exporter.SetParsedHeader(self.parser_header)
if isinstance(codeunit, MultipleCodeUnit.MultipleCodeUnit):
codeunit.SetCurrent(self.interface_file, exporter.Name())
else:
codeunit.SetCurrent(exporter.Name())
exporter.GenerateCode(codeunit, exported_names)
def Name(self):
return self.info.include
| mit |
necessary129/GitFetcher | cronjob.py | 1 | 1771 | #!/usr/bin/python3
# Copyright (C) 2015 Muhammed Shamil K
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, download it from here: https://noteness.cf/GPL.txt
# PDF: https://noteness.cf/GPL.pdf
import subprocess
import os
import optparse
import sys
parser = optparse.OptionParser(usage='Usage: %prog [options]')
parser.add_option('', '--dir',
help='Determines what directory the application resides in and '
'should be started from.')
(options, args) = parser.parse_args()
if not options.dir:
print("No dir given")
sys.exit(1)
os.chdir(options.dir)
def restart():
inst = subprocess.Popen("python3 main.py", close_fds=True,
stderr=subprocess.STDOUT,
stdin=None, stdout=subprocess.PIPE)
inst.communicate()
ret = inst.returncode
sys.exit(ret)
def alive(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
def getpid(fi):
with open(fi) as f:
return int(f.read())
try:
pid = getpid('GitFetcher.pid')
except FileNotFoundError:
restart()
if not alive(pid):
restart()
else:
sys.exit(0) | gpl-3.0 |
kingvuplus/b-p | lib/python/Screens/Scart.py | 126 | 1771 | from Screen import Screen
from MessageBox import MessageBox
from Components.AVSwitch import AVSwitch
from Tools import Notifications
class Scart(Screen):
def __init__(self, session, start_visible=True):
Screen.__init__(self, session)
self.msgBox = None
self.notificationVisible = None
self.avswitch = AVSwitch()
if start_visible:
self.onExecBegin.append(self.showMessageBox)
self.msgVisible = None
else:
self.msgVisible = False
def showMessageBox(self):
if self.msgVisible is None:
self.onExecBegin.remove(self.showMessageBox)
self.msgVisible = False
if not self.msgVisible:
self.msgVisible = True
self.avswitch.setInput("SCART")
if not self.session.in_exec:
self.notificationVisible = True
Notifications.AddNotificationWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR, msgBoxID = "scart_msgbox")
else:
self.msgBox = self.session.openWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR)
def MsgBoxClosed(self, *val):
self.msgBox = None
self.switchToTV()
def switchToTV(self, *val):
if self.msgVisible:
if self.msgBox:
self.msgBox.close() # ... MsgBoxClosed -> switchToTV again..
return
self.avswitch.setInput("ENCODER")
self.msgVisible = False
if self.notificationVisible:
self.avswitch.setInput("ENCODER")
self.notificationVisible = False
for notification in Notifications.current_notifications:
try:
if notification[1].msgBoxID == "scart_msgbox":
notification[1].close()
except:
print "other notification is open. try another one."
| gpl-2.0 |
ar4s/django | django/db/backends/postgresql_psycopg2/version.py | 12 | 1502 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL 8.3.6
# EnterpriseDB 8.3
# PostgreSQL 8.3 beta4
# PostgreSQL 8.4beta1
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
cursor = connection.cursor()
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| bsd-3-clause |
luzfcb/django-simple-history | simple_history/registry_tests/tests.py | 1 | 5594 | from __future__ import unicode_literals
import unittest
from datetime import datetime, timedelta
import django
from django.contrib.auth import get_user_model
from django.core import management
from django.test import TestCase
from simple_history import exceptions, register
from six.moves import cStringIO as StringIO
from ..tests.models import (Choice, InheritTracking1, InheritTracking2,
InheritTracking3, InheritTracking4, Poll,
Restaurant, TrackedAbstractBaseA,
TrackedAbstractBaseB, TrackedWithAbstractBase,
TrackedWithConcreteBase, UserAccessorDefault,
UserAccessorOverride, Voter)
try:
from django.apps import apps
except ImportError: # Django < 1.7
from django.db.models import get_model
else:
get_model = apps.get_model
User = get_user_model()
today = datetime(2021, 1, 1, 10, 0)
tomorrow = today + timedelta(days=1)
yesterday = today - timedelta(days=1)
class RegisterTest(TestCase):
def test_register_no_args(self):
self.assertEqual(len(Choice.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
self.assertEqual(len(choice.history.all()), 1)
def test_register_separate_app(self):
def get_history(model):
return model.history
self.assertRaises(AttributeError, get_history, User)
self.assertEqual(len(User.histories.all()), 0)
user = User.objects.create(username='bob', password='pass')
self.assertEqual(len(User.histories.all()), 1)
self.assertEqual(len(user.histories.all()), 1)
def test_reregister(self):
with self.assertRaises(exceptions.MultipleRegistrationsError):
register(Restaurant, manager_name='again')
def test_register_custome_records(self):
self.assertEqual(len(Voter.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
user = User.objects.create(username='voter')
voter = Voter.objects.create(choice=choice, user=user)
self.assertEqual(len(voter.history.all()), 1)
expected = 'Voter object changed by None as of '
self.assertEqual(expected,
str(voter.history.all()[0])[:len(expected)])
class TestUserAccessor(unittest.TestCase):
def test_accessor_default(self):
register(UserAccessorDefault)
assert not hasattr(User, 'historicaluseraccessordefault_set')
def test_accessor_override(self):
register(UserAccessorOverride,
user_related_name='my_history_model_accessor')
assert hasattr(User, 'my_history_model_accessor')
class TestTrackingInheritance(TestCase):
def test_tracked_abstract_base(self):
self.assertEqual(
[
f.attname
for f in TrackedWithAbstractBase.history.model._meta.fields
],
[
'id', 'history_id', 'history_date',
'history_change_reason', 'history_user_id',
'history_type',
],
)
def test_tracked_concrete_base(self):
self.assertEqual(
[
f.attname
for f in TrackedWithConcreteBase.history.model._meta.fields
],
[
'id', 'trackedconcretebase_ptr_id', 'history_id',
'history_date', 'history_change_reason', 'history_user_id',
'history_type',
],
)
def test_multiple_tracked_bases(self):
with self.assertRaises(exceptions.MultipleRegistrationsError):
class TrackedWithMultipleAbstractBases(
TrackedAbstractBaseA, TrackedAbstractBaseB):
pass
def test_tracked_abstract_and_untracked_concrete_base(self):
self.assertEqual(
[f.attname for f in InheritTracking1.history.model._meta.fields],
[
'id', 'untrackedconcretebase_ptr_id', 'history_id',
'history_date', 'history_change_reason',
'history_user_id', 'history_type',
],
)
def test_indirect_tracked_abstract_base(self):
self.assertEqual(
[f.attname for f in InheritTracking2.history.model._meta.fields],
[
'id', 'baseinherittracking2_ptr_id', 'history_id',
'history_date', 'history_change_reason',
'history_user_id', 'history_type',
],
)
def test_indirect_tracked_concrete_base(self):
self.assertEqual(
[f.attname for f in InheritTracking3.history.model._meta.fields],
[
'id', 'baseinherittracking3_ptr_id', 'history_id',
'history_date', 'history_change_reason',
'history_user_id', 'history_type',
],
)
def test_registering_with_tracked_abstract_base(self):
with self.assertRaises(exceptions.MultipleRegistrationsError):
register(InheritTracking4)
@unittest.skipUnless(django.get_version() >= "1.7", "Requires 1.7 migrations")
class TestMigrate(TestCase):
def test_makemigration_command(self):
management.call_command(
'makemigrations', 'migration_test_app', stdout=StringIO())
def test_migrate_command(self):
management.call_command(
'migrate', 'migration_test_app', fake=True, stdout=StringIO())
| bsd-3-clause |
ChengXiaoZ/MariaDBserver | storage/tokudb/mysql-test/tokudb/t/change_column_text.py | 54 | 1409 | import sys
def main():
print "# this test is generated by change_text.py"
print "# generate hot text expansion test cases"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
gen_tests([ "TINY", "", "MEDIUM", "LONG" ], [ "NULL", "NOT NULL"])
return 0
def gen_tests(base_types, null_types):
for from_index in range(len(base_types)):
for to_index in range(len(base_types)):
for from_null in range(len(null_types)):
for to_null in range(len(null_types)):
print "CREATE TABLE t (a %sTEXT %s);" % (base_types[from_index], null_types[from_null])
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a %sBLOB %s;" % (base_types[to_index], null_types[to_null]);
if from_null != to_null or from_index > to_index:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a %sTEXT %s;" % (base_types[to_index], null_types[to_null]);
print "DROP TABLE t;"
sys.exit(main())
| gpl-2.0 |
smartforceplus/SmartForceplus | addons/project/report/project_report.py | 279 | 5789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_project_task_user(osv.osv):
_name = "report.project.task.user"
_description = "Tasks by user and project"
_auto = False
_columns = {
'name': fields.char('Task Summary', readonly=True),
'user_id': fields.many2one('res.users', 'Assigned To', readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
'date_start': fields.datetime('Assignation Date', readonly=True),
'no_of_days': fields.integer('# of Days', size=128, readonly=True),
'date_end': fields.datetime('Ending Date', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
'closing_days': fields.float('Days to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the task"),
'opening_days': fields.float('Days to Assign', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to Open the task"),
'delay_endings_days': fields.float('Overpassed Deadline', digits=(16,2), readonly=True),
'nbr': fields.integer('# of Tasks', readonly=True), # TDE FIXME master: rename into nbr_tasks
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')],
string='Priority', size=1, readonly=True),
'state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')],'Status', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
}
_order = 'name desc, project_id'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'report_project_task_user')
cr.execute("""
CREATE view report_project_task_user as
SELECT
(select 1 ) AS nbr,
t.id as id,
t.date_start as date_start,
t.date_end as date_end,
t.date_last_stage_update as date_last_stage_update,
t.date_deadline as date_deadline,
abs((extract('epoch' from (t.write_date-t.date_start)))/(3600*24)) as no_of_days,
t.user_id,
t.reviewer_id,
progress as progress,
t.project_id,
t.effective_hours as hours_effective,
t.priority,
t.name as name,
t.company_id,
t.partner_id,
t.stage_id as stage_id,
t.kanban_state as state,
remaining_hours as remaining_hours,
total_hours as total_hours,
t.delay_hours as hours_delay,
planned_hours as hours_planned,
(extract('epoch' from (t.write_date-t.create_date)))/(3600*24) as closing_days,
(extract('epoch' from (t.date_start-t.create_date)))/(3600*24) as opening_days,
(extract('epoch' from (t.date_deadline-(now() at time zone 'UTC'))))/(3600*24) as delay_endings_days
FROM project_task t
WHERE t.active = 'true'
GROUP BY
t.id,
remaining_hours,
t.effective_hours,
progress,
total_hours,
planned_hours,
hours_delay,
create_date,
write_date,
date_start,
date_end,
date_deadline,
date_last_stage_update,
t.user_id,
t.reviewer_id,
t.project_id,
t.priority,
name,
t.company_id,
t.partner_id,
stage_id
""")
| agpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/lib/surface/meta/debug.py | 6 | 1629 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The `gcloud meta debug` command."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.meta import debug
class Debug(base.Command):
"""Run an interactive debug console with the Cloud SDK libraries loaded.
This command runs an interactive console with the Cloud SDK libraries loaded.
It's useful for:
* Manually testing out an API.
* Exploring available Cloud SDK core libraries.
* Debugging specific problems.
It comes with many utilities pre-loaded in the environment:
* All API clients loaded with one command (`LoadApis()`). Then, for instance,
`appengine` refers to the App Engine API client.
* Many common Cloud SDK imports pre-imported (e.g. core.util.files,
console_io, properties).
Use `dir()` to explore them all.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--mode', choices=sorted(debug.CONSOLES.keys()), default='python',
help='The debug console mode to run in.')
def Run(self, args):
debug.CONSOLES[args.mode]()
| apache-2.0 |
chhao91/QGIS | python/ext-libs/pygments/styles/friendly.py | 364 | 2515 | # -*- coding: utf-8 -*-
"""
pygments.styles.friendly
~~~~~~~~~~~~~~~~~~~~~~~~
A modern style based on the VIM pyte theme.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class FriendlyStyle(Style):
"""
A modern style based on the VIM pyte theme.
"""
background_color = "#f0f0f0"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #60a0b0",
Comment.Preproc: "noitalic #007020",
Comment.Special: "noitalic bg:#fff0f0",
Keyword: "bold #007020",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #902000",
Operator: "#666666",
Operator.Word: "bold #007020",
Name.Builtin: "#007020",
Name.Function: "#06287e",
Name.Class: "bold #0e84b5",
Name.Namespace: "bold #0e84b5",
Name.Exception: "#007020",
Name.Variable: "#bb60d5",
Name.Constant: "#60add5",
Name.Label: "bold #002070",
Name.Entity: "bold #d55537",
Name.Attribute: "#4070a0",
Name.Tag: "bold #062873",
Name.Decorator: "bold #555555",
String: "#4070a0",
String.Doc: "italic",
String.Interpol: "italic #70a0d0",
String.Escape: "bold #4070a0",
String.Regex: "#235388",
String.Symbol: "#517918",
String.Other: "#c65d09",
Number: "#40a070",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| gpl-2.0 |
abhishek-ch/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/presentation.py | 56 | 2716 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import PRESENTATIONNS
from .element import Element
# ODF 1.0 section 9.6 and 9.7
# Autogenerated
def AnimationGroup(**args):
return Element(qname = (PRESENTATIONNS,'animation-group'), **args)
def Animations(**args):
return Element(qname = (PRESENTATIONNS,'animations'), **args)
def DateTime(**args):
return Element(qname = (PRESENTATIONNS,'date-time'), **args)
def DateTimeDecl(**args):
return Element(qname = (PRESENTATIONNS,'date-time-decl'), **args)
def Dim(**args):
return Element(qname = (PRESENTATIONNS,'dim'), **args)
def EventListener(**args):
return Element(qname = (PRESENTATIONNS,'event-listener'), **args)
def Footer(**args):
return Element(qname = (PRESENTATIONNS,'footer'), **args)
def FooterDecl(**args):
return Element(qname = (PRESENTATIONNS,'footer-decl'), **args)
def Header(**args):
return Element(qname = (PRESENTATIONNS,'header'), **args)
def HeaderDecl(**args):
return Element(qname = (PRESENTATIONNS,'header-decl'), **args)
def HideShape(**args):
return Element(qname = (PRESENTATIONNS,'hide-shape'), **args)
def HideText(**args):
return Element(qname = (PRESENTATIONNS,'hide-text'), **args)
def Notes(**args):
return Element(qname = (PRESENTATIONNS,'notes'), **args)
def Placeholder(**args):
return Element(qname = (PRESENTATIONNS,'placeholder'), **args)
def Play(**args):
return Element(qname = (PRESENTATIONNS,'play'), **args)
def Settings(**args):
return Element(qname = (PRESENTATIONNS,'settings'), **args)
def Show(**args):
return Element(qname = (PRESENTATIONNS,'show'), **args)
def ShowShape(**args):
return Element(qname = (PRESENTATIONNS,'show-shape'), **args)
def ShowText(**args):
return Element(qname = (PRESENTATIONNS,'show-text'), **args)
def Sound(**args):
return Element(qname = (PRESENTATIONNS,'sound'), **args)
| apache-2.0 |
abloomston/sympy | sympy/core/tests/test_arit.py | 14 | 57459 | from __future__ import division
from sympy import (Basic, Symbol, sin, cos, exp, sqrt, Rational, Float, re, pi,
sympify, Add, Mul, Pow, Mod, I, log, S, Max, symbols, oo, Integer,
sign, im, nan, Dummy, factorial, comp
)
from sympy.core.compatibility import long, range
from sympy.utilities.iterables import cartes
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.randtest import verify_numerically
a, c, x, y, z = symbols('a,c,x,y,z')
b = Symbol("b", positive=True)
def same_and_same_prec(a, b):
# stricter matching for Floats
return a == b and a._prec == b._prec
def test_bug1():
assert re(x) != x
x.series(x, 0, 1)
assert re(x) != x
def test_Symbol():
e = a*b
assert e == a*b
assert a*b*b == a*b**2
assert a*b*b + c == c + a*b**2
assert a*b*b - c == -c + a*b**2
x = Symbol('x', complex=True, real=False)
assert x.is_imaginary is None # could be I or 1 + I
x = Symbol('x', complex=True, imaginary=False)
assert x.is_real is None # could be 1 or 1 + I
x = Symbol('x', real=True)
assert x.is_complex
x = Symbol('x', imaginary=True)
assert x.is_complex
x = Symbol('x', real=False, imaginary=False)
assert x.is_complex is None # might be a non-number
def test_arit0():
p = Rational(5)
e = a*b
assert e == a*b
e = a*b + b*a
assert e == 2*a*b
e = a*b + b*a + a*b + p*b*a
assert e == 8*a*b
e = a*b + b*a + a*b + p*b*a + a
assert e == a + 8*a*b
e = a + a
assert e == 2*a
e = a + b + a
assert e == b + 2*a
e = a + b*b + a + b*b
assert e == 2*a + 2*b**2
e = a + Rational(2) + b*b + a + b*b + p
assert e == 7 + 2*a + 2*b**2
e = (a + b*b + a + b*b)*p
assert e == 5*(2*a + 2*b**2)
e = (a*b*c + c*b*a + b*a*c)*p
assert e == 15*a*b*c
e = (a*b*c + c*b*a + b*a*c)*p - Rational(15)*a*b*c
assert e == Rational(0)
e = Rational(50)*(a - a)
assert e == Rational(0)
e = b*a - b - a*b + b
assert e == Rational(0)
e = a*b + c**p
assert e == a*b + c**5
e = a/b
assert e == a*b**(-1)
e = a*2*2
assert e == 4*a
e = 2 + a*2/2
assert e == 2 + a
e = 2 - a - 2
assert e == -a
e = 2*a*2
assert e == 4*a
e = 2/a/2
assert e == a**(-1)
e = 2**a**2
assert e == 2**(a**2)
e = -(1 + a)
assert e == -1 - a
e = Rational(1, 2)*(1 + a)
assert e == Rational(1, 2) + a/2
def test_div():
e = a/b
assert e == a*b**(-1)
e = a/b + c/2
assert e == a*b**(-1) + Rational(1)/2*c
e = (1 - b)/(b - 1)
assert e == (1 + -b)*((-1) + b)**(-1)
def test_pow():
n1 = Rational(1)
n2 = Rational(2)
n5 = Rational(5)
e = a*a
assert e == a**2
e = a*a*a
assert e == a**3
e = a*a*a*a**Rational(6)
assert e == a**9
e = a*a*a*a**Rational(6) - a**Rational(9)
assert e == Rational(0)
e = a**(b - b)
assert e == Rational(1)
e = (a - a)**b
assert e == Rational(0)
e = (a + Rational(1) - a)**b
assert e == Rational(1)
e = (a + b + c)**n2
assert e == (a + b + c)**2
assert e.expand() == 2*b*c + 2*a*c + 2*a*b + a**2 + c**2 + b**2
e = (a + b)**n2
assert e == (a + b)**2
assert e.expand() == 2*a*b + a**2 + b**2
e = (a + b)**(n1/n2)
assert e == sqrt(a + b)
assert e.expand() == sqrt(a + b)
n = n5**(n1/n2)
assert n == sqrt(5)
e = n*a*b - n*b*a
assert e == Rational(0)
e = n*a*b + n*b*a
assert e == 2*a*b*sqrt(5)
assert e.diff(a) == 2*b*sqrt(5)
assert e.diff(a) == 2*b*sqrt(5)
e = a/b**2
assert e == a*b**(-2)
assert sqrt(2*(1 + sqrt(2))) == (2*(1 + 2**Rational(1, 2)))**Rational(1, 2)
x = Symbol('x')
y = Symbol('y')
assert ((x*y)**3).expand() == y**3 * x**3
assert ((x*y)**-3).expand() == y**-3 * x**-3
assert (x**5*(3*x)**(3)).expand() == 27 * x**8
assert (x**5*(-3*x)**(3)).expand() == -27 * x**8
assert (x**5*(3*x)**(-3)).expand() == Rational(1, 27) * x**2
assert (x**5*(-3*x)**(-3)).expand() == -Rational(1, 27) * x**2
# expand_power_exp
assert (x**(y**(x + exp(x + y)) + z)).expand(deep=False) == \
x**z*x**(y**(x + exp(x + y)))
assert (x**(y**(x + exp(x + y)) + z)).expand() == \
x**z*x**(y**x*y**(exp(x)*exp(y)))
n = Symbol('n', even=False)
k = Symbol('k', even=True)
o = Symbol('o', odd=True)
assert (-1)**x == (-1)**x
assert (-1)**n == (-1)**n
assert (-2)**k == 2**k
assert (-2*x)**k == (2*x)**k # we choose not to auto expand this
assert (-2*x)**o == -(2*x)**o # but we do handle coefficient sign
assert (-1)**k == 1
def test_pow2():
# x**(2*y) is always (x**y)**2 but is only (x**2)**y if
# x.is_positive or y.is_integer
# let x = 1 to see why the following are not true.
assert (-x)**Rational(2, 3) != x**Rational(2, 3)
assert (-x)**Rational(5, 7) != -x**Rational(5, 7)
assert ((-x)**2)**Rational(1, 3) != ((-x)**Rational(1, 3))**2
assert sqrt(x**2) != x
def test_pow3():
assert sqrt(2)**3 == 2 * sqrt(2)
assert sqrt(2)**3 == sqrt(8)
def test_pow_E():
assert 2**(y/log(2)) == S.Exp1**y
assert 2**(y/log(2)/3) == S.Exp1**(y/3)
assert 3**(1/log(-3)) != S.Exp1
assert (3 + 2*I)**(1/(log(-3 - 2*I) + I*pi)) == S.Exp1
assert (4 + 2*I)**(1/(log(-4 - 2*I) + I*pi)) == S.Exp1
assert (3 + 2*I)**(1/(log(-3 - 2*I, 3)/2 + I*pi/log(3)/2)) == 9
assert (3 + 2*I)**(1/(log(3 + 2*I, 3)/2)) == 9
# every time tests are run they will affirm with a different random
# value that this identity holds
while 1:
b = x._random()
r, i = b.as_real_imag()
if i:
break
assert verify_numerically(b**(1/(log(-b) + sign(i)*I*pi).n()), S.Exp1)
def test_pow_issue_3516():
assert 4**Rational(1, 4) == sqrt(2)
def test_pow_im():
for m in (-2, -1, 2):
for d in (3, 4, 5):
b = m*I
for i in range(1, 4*d + 1):
e = Rational(i, d)
assert (b**e - b.n()**e.n()).n(2, chop=1e-10) == 0
e = Rational(7, 3)
assert (2*x*I)**e == 4*2**Rational(1, 3)*(I*x)**e # same as Wolfram Alpha
im = symbols('im', imaginary=True)
assert (2*im*I)**e == 4*2**Rational(1, 3)*(I*im)**e
args = [I, I, I, I, 2]
e = Rational(1, 3)
ans = 2**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args = [I, I, I, 2]
e = Rational(1, 3)
ans = 2**e*(-I)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args.append(-3)
ans = (6*I)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args.append(-1)
ans = (-6*I)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args = [I, I, 2]
e = Rational(1, 3)
ans = (-2)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args.append(-3)
ans = (6)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
args.append(-1)
ans = (-6)**e
assert Mul(*args, evaluate=False)**e == ans
assert Mul(*args)**e == ans
assert Mul(Pow(-1, Rational(3, 2), evaluate=False), I, I) == I
assert Mul(I*Pow(I, S.Half, evaluate=False)) == (-1)**Rational(3, 4)
def test_real_mul():
assert Float(0) * pi * x == Float(0)
assert set((Float(1) * pi * x).args) == set([Float(1), pi, x])
def test_ncmul():
A = Symbol("A", commutative=False)
B = Symbol("B", commutative=False)
C = Symbol("C", commutative=False)
assert A*B != B*A
assert A*B*C != C*B*A
assert A*b*B*3*C == 3*b*A*B*C
assert A*b*B*3*C != 3*b*B*A*C
assert A*b*B*3*C == 3*A*B*C*b
assert A + B == B + A
assert (A + B)*C != C*(A + B)
assert C*(A + B)*C != C*C*(A + B)
assert A*A == A**2
assert (A + B)*(A + B) == (A + B)**2
assert A**-1 * A == 1
assert A/A == 1
assert A/(A**2) == 1/A
assert A/(1 + A) == A/(1 + A)
assert set((A + B + 2*(A + B)).args) == \
set([A, B, 2*(A + B)])
def test_ncpow():
x = Symbol('x', commutative=False)
y = Symbol('y', commutative=False)
z = Symbol('z', commutative=False)
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
assert (x**2)*(y**2) != (y**2)*(x**2)
assert (x**-2)*y != y*(x**2)
assert 2**x*2**y != 2**(x + y)
assert 2**x*2**y*2**z != 2**(x + y + z)
assert 2**x*2**(2*x) == 2**(3*x)
assert 2**x*2**(2*x)*2**x == 2**(4*x)
assert exp(x)*exp(y) != exp(y)*exp(x)
assert exp(x)*exp(y)*exp(z) != exp(y)*exp(x)*exp(z)
assert exp(x)*exp(y)*exp(z) != exp(x + y + z)
assert x**a*x**b != x**(a + b)
assert x**a*x**b*x**c != x**(a + b + c)
assert x**3*x**4 == x**7
assert x**3*x**4*x**2 == x**9
assert x**a*x**(4*a) == x**(5*a)
assert x**a*x**(4*a)*x**a == x**(6*a)
def test_powerbug():
x = Symbol("x")
assert x**1 != (-x)**1
assert x**2 == (-x)**2
assert x**3 != (-x)**3
assert x**4 == (-x)**4
assert x**5 != (-x)**5
assert x**6 == (-x)**6
assert x**128 == (-x)**128
assert x**129 != (-x)**129
assert (2*x)**2 == (-2*x)**2
def test_Mul_doesnt_expand_exp():
x = Symbol('x')
y = Symbol('y')
assert exp(x)*exp(y) == exp(x)*exp(y)
assert 2**x*2**y == 2**x*2**y
assert x**2*x**3 == x**5
assert 2**x*3**x == 6**x
assert x**(y)*x**(2*y) == x**(3*y)
assert sqrt(2)*sqrt(2) == 2
assert 2**x*2**(2*x) == 2**(3*x)
assert sqrt(2)*2**Rational(1, 4)*5**Rational(3, 4) == 10**Rational(3, 4)
assert (x**(-log(5)/log(3))*x)/(x*x**( - log(5)/log(3))) == sympify(1)
def test_Add_Mul_is_integer():
x = Symbol('x')
k = Symbol('k', integer=True)
n = Symbol('n', integer=True)
assert (2*k).is_integer is True
assert (-k).is_integer is True
assert (k/3).is_integer is None
assert (x*k*n).is_integer is None
assert (k + n).is_integer is True
assert (k + x).is_integer is None
assert (k + n*x).is_integer is None
assert (k + n/3).is_integer is None
assert ((1 + sqrt(3))*(-sqrt(3) + 1)).is_integer is not False
assert (1 + (1 + sqrt(3))*(-sqrt(3) + 1)).is_integer is not False
def test_Add_Mul_is_finite():
x = Symbol('x', real=True, finite=False)
assert sin(x).is_finite is True
assert (x*sin(x)).is_finite is False
assert (1024*sin(x)).is_finite is True
assert (sin(x)*exp(x)).is_finite is not True
assert (sin(x)*cos(x)).is_finite is True
assert (x*sin(x)*exp(x)).is_finite is not True
assert (sin(x) - 67).is_finite is True
assert (sin(x) + exp(x)).is_finite is not True
assert (1 + x).is_finite is False
assert (1 + x**2 + (1 + x)*(1 - x)).is_finite is None
assert (sqrt(2)*(1 + x)).is_finite is False
assert (sqrt(2)*(1 + x)*(1 - x)).is_finite is False
def test_Mul_is_even_odd():
x = Symbol('x', integer=True)
y = Symbol('y', integer=True)
k = Symbol('k', odd=True)
n = Symbol('n', odd=True)
m = Symbol('m', even=True)
assert (2*x).is_even is True
assert (2*x).is_odd is False
assert (3*x).is_even is None
assert (3*x).is_odd is None
assert (k/3).is_integer is None
assert (k/3).is_even is None
assert (k/3).is_odd is None
assert (2*n).is_even is True
assert (2*n).is_odd is False
assert (2*m).is_even is True
assert (2*m).is_odd is False
assert (-n).is_even is False
assert (-n).is_odd is True
assert (k*n).is_even is False
assert (k*n).is_odd is True
assert (k*m).is_even is True
assert (k*m).is_odd is False
assert (k*n*m).is_even is True
assert (k*n*m).is_odd is False
assert (k*m*x).is_even is True
assert (k*m*x).is_odd is False
# issue 6791:
assert (x/2).is_integer is None
assert (k/2).is_integer is False
assert (m/2).is_integer is True
assert (x*y).is_even is None
assert (x*x).is_even is None
assert (x*(x + k)).is_even is True
assert (x*(x + m)).is_even is None
assert (x*y).is_odd is None
assert (x*x).is_odd is None
assert (x*(x + k)).is_odd is False
assert (x*(x + m)).is_odd is None
@XFAIL
def test_evenness_in_ternary_integer_product_with_odd():
# Tests that oddness inference is independent of term ordering.
# Term ordering at the point of testing depends on SymPy's symbol order, so
# we try to force a different order by modifying symbol names.
x = Symbol('x', integer=True)
y = Symbol('y', integer=True)
k = Symbol('k', odd=True)
assert (x*y*(y + k)).is_even is True
assert (y*x*(x + k)).is_even is True
def test_evenness_in_ternary_integer_product_with_even():
x = Symbol('x', integer=True)
y = Symbol('y', integer=True)
m = Symbol('m', even=True)
assert (x*y*(y + m)).is_even is None
@XFAIL
def test_oddness_in_ternary_integer_product_with_odd():
# Tests that oddness inference is independent of term ordering.
# Term ordering at the point of testing depends on SymPy's symbol order, so
# we try to force a different order by modifying symbol names.
x = Symbol('x', integer=True)
y = Symbol('y', integer=True)
k = Symbol('k', odd=True)
assert (x*y*(y + k)).is_odd is False
assert (y*x*(x + k)).is_odd is False
def test_oddness_in_ternary_integer_product_with_even():
x = Symbol('x', integer=True)
y = Symbol('y', integer=True)
m = Symbol('m', even=True)
assert (x*y*(y + m)).is_odd is None
def test_Mul_is_rational():
x = Symbol('x')
n = Symbol('n', integer=True)
m = Symbol('m', integer=True, nonzero=True)
assert (n/m).is_rational is True
assert (x/pi).is_rational is None
assert (x/n).is_rational is None
assert (m/pi).is_rational is False
r = Symbol('r', rational=True)
assert (pi*r).is_rational is None
# issue 8008
z = Symbol('z', zero=True)
i = Symbol('i', imaginary=True)
assert (z*i).is_rational is None
bi = Symbol('i', imaginary=True, finite=True)
assert (z*bi).is_zero is True
def test_Add_is_rational():
x = Symbol('x')
n = Symbol('n', rational=True)
m = Symbol('m', rational=True)
assert (n + m).is_rational is True
assert (x + pi).is_rational is None
assert (x + n).is_rational is None
assert (n + pi).is_rational is False
def test_Add_is_even_odd():
x = Symbol('x', integer=True)
k = Symbol('k', odd=True)
n = Symbol('n', odd=True)
m = Symbol('m', even=True)
assert (k + 7).is_even is True
assert (k + 7).is_odd is False
assert (-k + 7).is_even is True
assert (-k + 7).is_odd is False
assert (k - 12).is_even is False
assert (k - 12).is_odd is True
assert (-k - 12).is_even is False
assert (-k - 12).is_odd is True
assert (k + n).is_even is True
assert (k + n).is_odd is False
assert (k + m).is_even is False
assert (k + m).is_odd is True
assert (k + n + m).is_even is True
assert (k + n + m).is_odd is False
assert (k + n + x + m).is_even is None
assert (k + n + x + m).is_odd is None
def test_Mul_is_negative_positive():
x = Symbol('x', real=True)
y = Symbol('y', real=False, complex=True)
z = Symbol('z', zero=True)
e = 2*z
assert e.is_Mul and e.is_positive is False and e.is_negative is False
neg = Symbol('neg', negative=True)
pos = Symbol('pos', positive=True)
nneg = Symbol('nneg', nonnegative=True)
npos = Symbol('npos', nonpositive=True)
assert neg.is_negative is True
assert (-neg).is_negative is False
assert (2*neg).is_negative is True
assert (2*pos)._eval_is_negative() is False
assert (2*pos).is_negative is False
assert pos.is_negative is False
assert (-pos).is_negative is True
assert (2*pos).is_negative is False
assert (pos*neg).is_negative is True
assert (2*pos*neg).is_negative is True
assert (-pos*neg).is_negative is False
assert (pos*neg*y).is_negative is False # y.is_real=F; !real -> !neg
assert nneg.is_negative is False
assert (-nneg).is_negative is None
assert (2*nneg).is_negative is False
assert npos.is_negative is None
assert (-npos).is_negative is False
assert (2*npos).is_negative is None
assert (nneg*npos).is_negative is None
assert (neg*nneg).is_negative is None
assert (neg*npos).is_negative is False
assert (pos*nneg).is_negative is False
assert (pos*npos).is_negative is None
assert (npos*neg*nneg).is_negative is False
assert (npos*pos*nneg).is_negative is None
assert (-npos*neg*nneg).is_negative is None
assert (-npos*pos*nneg).is_negative is False
assert (17*npos*neg*nneg).is_negative is False
assert (17*npos*pos*nneg).is_negative is None
assert (neg*npos*pos*nneg).is_negative is False
assert (x*neg).is_negative is None
assert (nneg*npos*pos*x*neg).is_negative is None
assert neg.is_positive is False
assert (-neg).is_positive is True
assert (2*neg).is_positive is False
assert pos.is_positive is True
assert (-pos).is_positive is False
assert (2*pos).is_positive is True
assert (pos*neg).is_positive is False
assert (2*pos*neg).is_positive is False
assert (-pos*neg).is_positive is True
assert (-pos*neg*y).is_positive is False # y.is_real=F; !real -> !neg
assert nneg.is_positive is None
assert (-nneg).is_positive is False
assert (2*nneg).is_positive is None
assert npos.is_positive is False
assert (-npos).is_positive is None
assert (2*npos).is_positive is False
assert (nneg*npos).is_positive is False
assert (neg*nneg).is_positive is False
assert (neg*npos).is_positive is None
assert (pos*nneg).is_positive is None
assert (pos*npos).is_positive is False
assert (npos*neg*nneg).is_positive is None
assert (npos*pos*nneg).is_positive is False
assert (-npos*neg*nneg).is_positive is False
assert (-npos*pos*nneg).is_positive is None
assert (17*npos*neg*nneg).is_positive is None
assert (17*npos*pos*nneg).is_positive is False
assert (neg*npos*pos*nneg).is_positive is None
assert (x*neg).is_positive is None
assert (nneg*npos*pos*x*neg).is_positive is None
def test_Mul_is_negative_positive_2():
a = Symbol('a', nonnegative=True)
b = Symbol('b', nonnegative=True)
c = Symbol('c', nonpositive=True)
d = Symbol('d', nonpositive=True)
assert (a*b).is_nonnegative is True
assert (a*b).is_negative is False
assert (a*b).is_zero is None
assert (a*b).is_positive is None
assert (c*d).is_nonnegative is True
assert (c*d).is_negative is False
assert (c*d).is_zero is None
assert (c*d).is_positive is None
assert (a*c).is_nonpositive is True
assert (a*c).is_positive is False
assert (a*c).is_zero is None
assert (a*c).is_negative is None
def test_Mul_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert k.is_nonpositive is True
assert (-k).is_nonpositive is False
assert (2*k).is_nonpositive is True
assert n.is_nonpositive is False
assert (-n).is_nonpositive is True
assert (2*n).is_nonpositive is False
assert (n*k).is_nonpositive is True
assert (2*n*k).is_nonpositive is True
assert (-n*k).is_nonpositive is False
assert u.is_nonpositive is None
assert (-u).is_nonpositive is True
assert (2*u).is_nonpositive is None
assert v.is_nonpositive is True
assert (-v).is_nonpositive is None
assert (2*v).is_nonpositive is True
assert (u*v).is_nonpositive is True
assert (k*u).is_nonpositive is True
assert (k*v).is_nonpositive is None
assert (n*u).is_nonpositive is None
assert (n*v).is_nonpositive is True
assert (v*k*u).is_nonpositive is None
assert (v*n*u).is_nonpositive is True
assert (-v*k*u).is_nonpositive is True
assert (-v*n*u).is_nonpositive is None
assert (17*v*k*u).is_nonpositive is None
assert (17*v*n*u).is_nonpositive is True
assert (k*v*n*u).is_nonpositive is None
assert (x*k).is_nonpositive is None
assert (u*v*n*x*k).is_nonpositive is None
assert k.is_nonnegative is False
assert (-k).is_nonnegative is True
assert (2*k).is_nonnegative is False
assert n.is_nonnegative is True
assert (-n).is_nonnegative is False
assert (2*n).is_nonnegative is True
assert (n*k).is_nonnegative is False
assert (2*n*k).is_nonnegative is False
assert (-n*k).is_nonnegative is True
assert u.is_nonnegative is True
assert (-u).is_nonnegative is None
assert (2*u).is_nonnegative is True
assert v.is_nonnegative is None
assert (-v).is_nonnegative is True
assert (2*v).is_nonnegative is None
assert (u*v).is_nonnegative is None
assert (k*u).is_nonnegative is None
assert (k*v).is_nonnegative is True
assert (n*u).is_nonnegative is True
assert (n*v).is_nonnegative is None
assert (v*k*u).is_nonnegative is True
assert (v*n*u).is_nonnegative is None
assert (-v*k*u).is_nonnegative is None
assert (-v*n*u).is_nonnegative is True
assert (17*v*k*u).is_nonnegative is True
assert (17*v*n*u).is_nonnegative is None
assert (k*v*n*u).is_nonnegative is True
assert (x*k).is_nonnegative is None
assert (u*v*n*x*k).is_nonnegative is None
def test_Add_is_negative_positive():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert (k - 2).is_negative is True
assert (k + 17).is_negative is None
assert (-k - 5).is_negative is None
assert (-k + 123).is_negative is False
assert (k - n).is_negative is True
assert (k + n).is_negative is None
assert (-k - n).is_negative is None
assert (-k + n).is_negative is False
assert (k - n - 2).is_negative is True
assert (k + n + 17).is_negative is None
assert (-k - n - 5).is_negative is None
assert (-k + n + 123).is_negative is False
assert (-2*k + 123*n + 17).is_negative is False
assert (k + u).is_negative is None
assert (k + v).is_negative is True
assert (n + u).is_negative is False
assert (n + v).is_negative is None
assert (u - v).is_negative is False
assert (u + v).is_negative is None
assert (-u - v).is_negative is None
assert (-u + v).is_negative is None
assert (u - v + n + 2).is_negative is False
assert (u + v + n + 2).is_negative is None
assert (-u - v + n + 2).is_negative is None
assert (-u + v + n + 2).is_negative is None
assert (k + x).is_negative is None
assert (k + x - n).is_negative is None
assert (k - 2).is_positive is False
assert (k + 17).is_positive is None
assert (-k - 5).is_positive is None
assert (-k + 123).is_positive is True
assert (k - n).is_positive is False
assert (k + n).is_positive is None
assert (-k - n).is_positive is None
assert (-k + n).is_positive is True
assert (k - n - 2).is_positive is False
assert (k + n + 17).is_positive is None
assert (-k - n - 5).is_positive is None
assert (-k + n + 123).is_positive is True
assert (-2*k + 123*n + 17).is_positive is True
assert (k + u).is_positive is None
assert (k + v).is_positive is False
assert (n + u).is_positive is True
assert (n + v).is_positive is None
assert (u - v).is_positive is None
assert (u + v).is_positive is None
assert (-u - v).is_positive is None
assert (-u + v).is_positive is False
assert (u - v - n - 2).is_positive is None
assert (u + v - n - 2).is_positive is None
assert (-u - v - n - 2).is_positive is None
assert (-u + v - n - 2).is_positive is False
assert (n + x).is_positive is None
assert (n + x - k).is_positive is None
z = (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2)
assert z.is_zero
z = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert z.is_zero
def test_Add_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert (u - 2).is_nonpositive is None
assert (u + 17).is_nonpositive is False
assert (-u - 5).is_nonpositive is True
assert (-u + 123).is_nonpositive is None
assert (u - v).is_nonpositive is None
assert (u + v).is_nonpositive is None
assert (-u - v).is_nonpositive is None
assert (-u + v).is_nonpositive is True
assert (u - v - 2).is_nonpositive is None
assert (u + v + 17).is_nonpositive is None
assert (-u - v - 5).is_nonpositive is None
assert (-u + v - 123).is_nonpositive is True
assert (-2*u + 123*v - 17).is_nonpositive is True
assert (k + u).is_nonpositive is None
assert (k + v).is_nonpositive is True
assert (n + u).is_nonpositive is False
assert (n + v).is_nonpositive is None
assert (k - n).is_nonpositive is True
assert (k + n).is_nonpositive is None
assert (-k - n).is_nonpositive is None
assert (-k + n).is_nonpositive is False
assert (k - n + u + 2).is_nonpositive is None
assert (k + n + u + 2).is_nonpositive is None
assert (-k - n + u + 2).is_nonpositive is None
assert (-k + n + u + 2).is_nonpositive is False
assert (u + x).is_nonpositive is None
assert (v - x - n).is_nonpositive is None
assert (u - 2).is_nonnegative is None
assert (u + 17).is_nonnegative is True
assert (-u - 5).is_nonnegative is False
assert (-u + 123).is_nonnegative is None
assert (u - v).is_nonnegative is True
assert (u + v).is_nonnegative is None
assert (-u - v).is_nonnegative is None
assert (-u + v).is_nonnegative is None
assert (u - v + 2).is_nonnegative is True
assert (u + v + 17).is_nonnegative is None
assert (-u - v - 5).is_nonnegative is None
assert (-u + v - 123).is_nonnegative is False
assert (2*u - 123*v + 17).is_nonnegative is True
assert (k + u).is_nonnegative is None
assert (k + v).is_nonnegative is False
assert (n + u).is_nonnegative is True
assert (n + v).is_nonnegative is None
assert (k - n).is_nonnegative is False
assert (k + n).is_nonnegative is None
assert (-k - n).is_nonnegative is None
assert (-k + n).is_nonnegative is True
assert (k - n - u - 2).is_nonnegative is False
assert (k + n - u - 2).is_nonnegative is None
assert (-k - n - u - 2).is_nonnegative is None
assert (-k + n - u - 2).is_nonnegative is None
assert (u - x).is_nonnegative is None
assert (v + x + n).is_nonnegative is None
def test_Pow_is_integer():
x = Symbol('x')
k = Symbol('k', integer=True)
n = Symbol('n', integer=True, nonnegative=True)
m = Symbol('m', integer=True, positive=True)
assert (k**2).is_integer is True
assert (k**(-2)).is_integer is None
assert ((m + 1)**(-2)).is_integer is False
assert (m**(-1)).is_integer is None # issue 8580
assert (2**k).is_integer is None
assert (2**(-k)).is_integer is None
assert (2**n).is_integer is True
assert (2**(-n)).is_integer is None
assert (2**m).is_integer is True
assert (2**(-m)).is_integer is False
assert (x**2).is_integer is None
assert (2**x).is_integer is None
assert (k**n).is_integer is True
assert (k**(-n)).is_integer is None
assert (k**x).is_integer is None
assert (x**k).is_integer is None
assert (k**(n*m)).is_integer is True
assert (k**(-n*m)).is_integer is None
assert sqrt(3).is_integer is False
assert sqrt(.3).is_integer is False
assert Pow(3, 2, evaluate=False).is_integer is True
assert Pow(3, 0, evaluate=False).is_integer is True
assert Pow(3, -2, evaluate=False).is_integer is False
assert Pow(S.Half, 3, evaluate=False).is_integer is False
# decided by re-evaluating
assert Pow(3, S.Half, evaluate=False).is_integer is False
assert Pow(3, S.Half, evaluate=False).is_integer is False
assert Pow(4, S.Half, evaluate=False).is_integer is True
assert Pow(S.Half, -2, evaluate=False).is_integer is True
assert ((-1)**k).is_integer
x = Symbol('x', real=True, integer=False)
assert (x**2).is_integer is None # issue 8641
def test_Pow_is_real():
x = Symbol('x', real=True)
y = Symbol('y', real=True, positive=True)
assert (x**2).is_real is True
assert (x**3).is_real is True
assert (x**x).is_real is None
assert (y**x).is_real is True
assert (x**Rational(1, 3)).is_real is None
assert (y**Rational(1, 3)).is_real is True
assert sqrt(-1 - sqrt(2)).is_real is False
i = Symbol('i', imaginary=True)
assert (i**i).is_real is None
assert (I**i).is_real is True
assert ((-I)**i).is_real is True
assert (2**i).is_real is None # (2**(pi/log(2) * I)) is real, 2**I is not
assert (2**I).is_real is False
assert (2**-I).is_real is False
assert (i**2).is_real is True
assert (i**3).is_real is False
assert (i**x).is_real is None # could be (-I)**(2/3)
e = Symbol('e', even=True)
o = Symbol('o', odd=True)
k = Symbol('k', integer=True)
assert (i**e).is_real is True
assert (i**o).is_real is False
assert (i**k).is_real is None
assert (i**(4*k)).is_real is True
x = Symbol("x", nonnegative=True)
y = Symbol("y", nonnegative=True)
assert im(x**y).expand(complex=True) is S.Zero
assert (x**y).is_real is True
i = Symbol('i', imaginary=True)
assert (exp(i)**I).is_real is True
assert log(exp(i)).is_imaginary is None # i could be 2*pi*I
c = Symbol('c', complex=True)
assert log(c).is_real is None # c could be 0 or 2, too
assert log(exp(c)).is_real is None # log(0), log(E), ...
n = Symbol('n', negative=False)
assert log(n).is_real is None
n = Symbol('n', nonnegative=True)
assert log(n).is_real is None
assert sqrt(-I).is_real is False # issue 7843
def test_real_Pow():
k = Symbol('k', integer=True, nonzero=True)
assert (k**(I*pi/log(k))).is_real
def test_Pow_is_finite():
x = Symbol('x', real=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
assert (x**2).is_finite is None # x could be oo
assert (x**x).is_finite is None # ditto
assert (p**x).is_finite is None # ditto
assert (n**x).is_finite is None # ditto
assert (1/S.Pi).is_finite
assert (sin(x)**2).is_finite is True
assert (sin(x)**x).is_finite is None
assert (sin(x)**exp(x)).is_finite is None
assert (1/sin(x)).is_finite is None # if zero, no, otherwise yes
assert (1/exp(x)).is_finite is None # x could be -oo
def test_Pow_is_even_odd():
x = Symbol('x')
k = Symbol('k', even=True)
n = Symbol('n', odd=True)
m = Symbol('m', integer=True, nonnegative=True)
p = Symbol('p', integer=True, positive=True)
assert ((-1)**n).is_odd
assert ((-1)**k).is_odd
assert ((-1)**(m - p)).is_odd
assert (k**2).is_even is True
assert (n**2).is_even is False
assert (2**k).is_even is None
assert (x**2).is_even is None
assert (k**m).is_even is None
assert (n**m).is_even is False
assert (k**p).is_even is True
assert (n**p).is_even is False
assert (m**k).is_even is None
assert (p**k).is_even is None
assert (m**n).is_even is None
assert (p**n).is_even is None
assert (k**x).is_even is None
assert (n**x).is_even is None
assert (k**2).is_odd is False
assert (n**2).is_odd is True
assert (3**k).is_odd is None
assert (k**m).is_odd is None
assert (n**m).is_odd is True
assert (k**p).is_odd is False
assert (n**p).is_odd is True
assert (m**k).is_odd is None
assert (p**k).is_odd is None
assert (m**n).is_odd is None
assert (p**n).is_odd is None
assert (k**x).is_odd is None
assert (n**x).is_odd is None
def test_Pow_is_negative_positive():
r = Symbol('r', real=True)
k = Symbol('k', integer=True, positive=True)
n = Symbol('n', even=True)
m = Symbol('m', odd=True)
x = Symbol('x')
assert (2**r).is_positive is True
assert ((-2)**r).is_positive is None
assert ((-2)**n).is_positive is True
assert ((-2)**m).is_positive is False
assert (k**2).is_positive is True
assert (k**(-2)).is_positive is True
assert (k**r).is_positive is True
assert ((-k)**r).is_positive is None
assert ((-k)**n).is_positive is True
assert ((-k)**m).is_positive is False
assert (2**r).is_negative is False
assert ((-2)**r).is_negative is None
assert ((-2)**n).is_negative is False
assert ((-2)**m).is_negative is True
assert (k**2).is_negative is False
assert (k**(-2)).is_negative is False
assert (k**r).is_negative is False
assert ((-k)**r).is_negative is None
assert ((-k)**n).is_negative is False
assert ((-k)**m).is_negative is True
assert (2**x).is_positive is None
assert (2**x).is_negative is None
def test_Pow_is_zero():
z = Symbol('z', zero=True)
e = z**2
assert e.is_zero
assert e.is_positive is False
assert e.is_negative is False
assert Pow(0, 0, evaluate=False).is_zero is False
assert Pow(0, 3, evaluate=False).is_zero
assert Pow(0, oo, evaluate=False).is_zero
assert Pow(0, -3, evaluate=False).is_zero is False
assert Pow(0, -oo, evaluate=False).is_zero is False
assert Pow(2, 2, evaluate=False).is_zero is False
a = Symbol('a', zero=False)
assert Pow(a, 3).is_zero is False # issue 7965
assert Pow(2, oo, evaluate=False).is_zero is False
assert Pow(2, -oo, evaluate=False).is_zero
assert Pow(S.Half, oo, evaluate=False).is_zero
assert Pow(S.Half, -oo, evaluate=False).is_zero is False
def test_Pow_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', integer=True, nonnegative=True)
l = Symbol('l', integer=True, positive=True)
n = Symbol('n', even=True)
m = Symbol('m', odd=True)
assert (x**(4*k)).is_nonnegative is True
assert (2**x).is_nonnegative is True
assert ((-2)**x).is_nonnegative is None
assert ((-2)**n).is_nonnegative is True
assert ((-2)**m).is_nonnegative is False
assert (k**2).is_nonnegative is True
assert (k**(-2)).is_nonnegative is None
assert (k**k).is_nonnegative is True
assert (k**x).is_nonnegative is None # NOTE (0**x).is_real = U
assert (l**x).is_nonnegative is True
assert (l**x).is_positive is True
assert ((-k)**x).is_nonnegative is None
assert ((-k)**n).is_nonnegative is None
assert ((-k)**m).is_nonnegative is None
assert (2**x).is_nonpositive is False
assert ((-2)**x).is_nonpositive is None
assert ((-2)**n).is_nonpositive is False
assert ((-2)**m).is_nonpositive is True
assert (k**2).is_nonpositive is None
assert (k**(-2)).is_nonpositive is None
assert (k**x).is_nonpositive is None
assert ((-k)**x).is_nonpositive is None
assert ((-k)**n).is_nonpositive is None
assert ((-k)**m).is_nonpositive is None
assert (x**2).is_nonnegative is True
i = symbols('i', imaginary=True)
assert (i**2).is_nonpositive is True
assert (i**4).is_nonpositive is False
assert (i**3).is_nonpositive is False
assert (I**i).is_nonnegative is True
assert (exp(I)**i).is_nonnegative is True
def test_Mul_is_imaginary_real():
r = Symbol('r', real=True)
p = Symbol('p', positive=True)
i = Symbol('i', imaginary=True)
ii = Symbol('ii', imaginary=True)
x = Symbol('x')
assert I.is_imaginary is True
assert I.is_real is False
assert (-I).is_imaginary is True
assert (-I).is_real is False
assert (3*I).is_imaginary is True
assert (3*I).is_real is False
assert (I*I).is_imaginary is False
assert (I*I).is_real is True
e = (p + p*I)
j = Symbol('j', integer=True, zero=False)
assert (e**j).is_real is None
assert (e**(2*j)).is_real is None
assert (e**j).is_imaginary is None
assert (e**(2*j)).is_imaginary is None
assert (e**-1).is_imaginary is False
assert (e**2).is_imaginary
assert (e**3).is_imaginary is False
assert (e**4).is_imaginary is False
assert (e**5).is_imaginary is False
assert (e**-1).is_real is False
assert (e**2).is_real is False
assert (e**3).is_real is False
assert (e**4).is_real
assert (e**5).is_real is False
assert (e**3).is_complex
assert (r*i).is_imaginary is None
assert (r*i).is_real is None
assert (x*i).is_imaginary is None
assert (x*i).is_real is None
assert (i*ii).is_imaginary is False
assert (i*ii).is_real is True
assert (r*i*ii).is_imaginary is False
assert (r*i*ii).is_real is True
# Github's issue 5874:
nr = Symbol('nr', real=False, complex=True)
a = Symbol('a', real=True, nonzero=True)
b = Symbol('b', real=True)
assert (i*nr).is_real is None
assert (a*nr).is_real is False
assert (b*nr).is_real is None
def test_Mul_hermitian_antihermitian():
a = Symbol('a', hermitian=True, zero=False)
b = Symbol('b', hermitian=True)
c = Symbol('c', hermitian=False)
d = Symbol('d', antihermitian=True)
e1 = Mul(a, b, c, evaluate=False)
e2 = Mul(b, a, c, evaluate=False)
e3 = Mul(a, b, c, d, evaluate=False)
e4 = Mul(b, a, c, d, evaluate=False)
e5 = Mul(a, c, evaluate=False)
e6 = Mul(a, c, d, evaluate=False)
assert e1.is_hermitian is None
assert e2.is_hermitian is None
assert e1.is_antihermitian is None
assert e2.is_antihermitian is None
assert e3.is_antihermitian is None
assert e4.is_antihermitian is None
assert e5.is_antihermitian is None
assert e6.is_antihermitian is None
def test_Add_is_comparable():
assert (x + y).is_comparable is False
assert (x + 1).is_comparable is False
assert (Rational(1, 3) - sqrt(8)).is_comparable is True
def test_Mul_is_comparable():
assert (x*y).is_comparable is False
assert (x*2).is_comparable is False
assert (sqrt(2)*Rational(1, 3)).is_comparable is True
def test_Pow_is_comparable():
assert (x**y).is_comparable is False
assert (x**2).is_comparable is False
assert (sqrt(Rational(1, 3))).is_comparable is True
def test_Add_is_positive_2():
e = Rational(1, 3) - sqrt(8)
assert e.is_positive is False
assert e.is_negative is True
e = pi - 1
assert e.is_positive is True
assert e.is_negative is False
def test_Add_is_irrational():
i = Symbol('i', irrational=True)
assert i.is_irrational is True
assert i.is_rational is False
assert (i + 1).is_irrational is True
assert (i + 1).is_rational is False
@XFAIL
def test_issue_3531():
class MightyNumeric(tuple):
def __rdiv__(self, other):
return "something"
def __rtruediv__(self, other):
return "something"
assert sympify(1)/MightyNumeric((1, 2)) == "something"
def test_issue_3531b():
class Foo:
def __init__(self):
self.field = 1.0
def __mul__(self, other):
self.field = self.field * other
def __rmul__(self, other):
self.field = other * self.field
f = Foo()
x = Symbol("x")
assert f*x == x*f
def test_bug3():
a = Symbol("a")
b = Symbol("b", positive=True)
e = 2*a + b
f = b + 2*a
assert e == f
def test_suppressed_evaluation():
a = Add(0, 3, 2, evaluate=False)
b = Mul(1, 3, 2, evaluate=False)
c = Pow(3, 2, evaluate=False)
assert a != 6
assert a.func is Add
assert a.args == (3, 2)
assert b != 6
assert b.func is Mul
assert b.args == (3, 2)
assert c != 9
assert c.func is Pow
assert c.args == (3, 2)
def test_Add_as_coeff_mul():
# issue 5524. These should all be (1, self)
assert (x + 1).as_coeff_mul() == (1, (x + 1,))
assert (x + 2).as_coeff_mul() == (1, (x + 2,))
assert (x + 3).as_coeff_mul() == (1, (x + 3,))
assert (x - 1).as_coeff_mul() == (1, (x - 1,))
assert (x - 2).as_coeff_mul() == (1, (x - 2,))
assert (x - 3).as_coeff_mul() == (1, (x - 3,))
n = Symbol('n', integer=True)
assert (n + 1).as_coeff_mul() == (1, (n + 1,))
assert (n + 2).as_coeff_mul() == (1, (n + 2,))
assert (n + 3).as_coeff_mul() == (1, (n + 3,))
assert (n - 1).as_coeff_mul() == (1, (n - 1,))
assert (n - 2).as_coeff_mul() == (1, (n - 2,))
assert (n - 3).as_coeff_mul() == (1, (n - 3,))
def test_Pow_as_coeff_mul_doesnt_expand():
assert exp(x + y).as_coeff_mul() == (1, (exp(x + y),))
assert exp(x + exp(x + y)) != exp(x + exp(x)*exp(y))
def test_issue_3514():
assert sqrt(S.Half) * sqrt(6) == 2 * sqrt(3)/2
assert S(1)/2*sqrt(6)*sqrt(2) == sqrt(3)
assert sqrt(6)/2*sqrt(2) == sqrt(3)
assert sqrt(6)*sqrt(2)/2 == sqrt(3)
def test_make_args():
assert Add.make_args(x) == (x,)
assert Mul.make_args(x) == (x,)
assert Add.make_args(x*y*z) == (x*y*z,)
assert Mul.make_args(x*y*z) == (x*y*z).args
assert Add.make_args(x + y + z) == (x + y + z).args
assert Mul.make_args(x + y + z) == (x + y + z,)
assert Add.make_args((x + y)**z) == ((x + y)**z,)
assert Mul.make_args((x + y)**z) == ((x + y)**z,)
def test_issue_5126():
assert (-2)**x*(-3)**x != 6**x
i = Symbol('i', integer=1)
assert (-2)**i*(-3)**i == 6**i
def test_Rational_as_content_primitive():
c, p = S(1), S(0)
assert (c*p).as_content_primitive() == (c, p)
c, p = S(1)/2, S(1)
assert (c*p).as_content_primitive() == (c, p)
def test_Add_as_content_primitive():
assert (x + 2).as_content_primitive() == (1, x + 2)
assert (3*x + 2).as_content_primitive() == (1, 3*x + 2)
assert (3*x + 3).as_content_primitive() == (3, x + 1)
assert (3*x + 6).as_content_primitive() == (3, x + 2)
assert (3*x + 2*y).as_content_primitive() == (1, 3*x + 2*y)
assert (3*x + 3*y).as_content_primitive() == (3, x + y)
assert (3*x + 6*y).as_content_primitive() == (3, x + 2*y)
assert (3/x + 2*x*y*z**2).as_content_primitive() == (1, 3/x + 2*x*y*z**2)
assert (3/x + 3*x*y*z**2).as_content_primitive() == (3, 1/x + x*y*z**2)
assert (3/x + 6*x*y*z**2).as_content_primitive() == (3, 1/x + 2*x*y*z**2)
assert (2*x/3 + 4*y/9).as_content_primitive() == \
(Rational(2, 9), 3*x + 2*y)
assert (2*x/3 + 2.5*y).as_content_primitive() == \
(Rational(1, 3), 2*x + 7.5*y)
# the coefficient may sort to a position other than 0
p = 3 + x + y
assert (2*p).expand().as_content_primitive() == (2, p)
assert (2.0*p).expand().as_content_primitive() == (1, 2.*p)
p *= -1
assert (2*p).expand().as_content_primitive() == (2, p)
def test_Mul_as_content_primitive():
assert (2*x).as_content_primitive() == (2, x)
assert (x*(2 + 2*x)).as_content_primitive() == (2, x*(1 + x))
assert (x*(2 + 2*y)*(3*x + 3)**2).as_content_primitive() == \
(18, x*(1 + y)*(x + 1)**2)
assert ((2 + 2*x)**2*(3 + 6*x) + S.Half).as_content_primitive() == \
(S.Half, 24*(x + 1)**2*(2*x + 1) + 1)
def test_Pow_as_content_primitive():
assert (x**y).as_content_primitive() == (1, x**y)
assert ((2*x + 2)**y).as_content_primitive() == \
(1, (Mul(2, (x + 1), evaluate=False))**y)
assert ((2*x + 2)**3).as_content_primitive() == (8, (x + 1)**3)
def test_issue_5460():
u = Mul(2, (1 + x), evaluate=False)
assert (2 + u).args == (2, u)
def test_product_irrational():
from sympy import I, pi
assert (I*pi).is_irrational is False
# The following used to be deduced from the above bug:
assert (I*pi).is_positive is False
def test_issue_5919():
assert (x/(y*(1 + y))).expand() == x/(y**2 + y)
def test_Mod():
assert Mod(x, 1).func is Mod
assert pi % pi == S.Zero
assert Mod(5, 3) == 2
assert Mod(-5, 3) == 1
assert Mod(5, -3) == -1
assert Mod(-5, -3) == -2
assert type(Mod(3.2, 2, evaluate=False)) == Mod
assert 5 % x == Mod(5, x)
assert x % 5 == Mod(x, 5)
assert x % y == Mod(x, y)
assert (x % y).subs({x: 5, y: 3}) == 2
# Float handling
point3 = Float(3.3) % 1
assert (x - 3.3) % 1 == Mod(1.*x + 1 - point3, 1)
assert Mod(-3.3, 1) == 1 - point3
assert Mod(0.7, 1) == Float(0.7)
e = Mod(1.3, 1)
assert comp(e, .3) and e.is_Float
e = Mod(1.3, .7)
assert comp(e, .6) and e.is_Float
e = Mod(1.3, Rational(7, 10))
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), 0.7)
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), Rational(7, 10))
assert comp(e, .6) and e.is_Rational
# check that sign is right
r2 = sqrt(2)
r3 = sqrt(3)
for i in [-r3, -r2, r2, r3]:
for j in [-r3, -r2, r2, r3]:
assert verify_numerically(i % j, i.n() % j.n())
for _x in range(4):
for _y in range(9):
reps = [(x, _x), (y, _y)]
assert Mod(3*x + y, 9).subs(reps) == (3*_x + _y) % 9
# denesting
# easy case
assert Mod(Mod(x, y), y) == Mod(x, y)
# in case someone attempts more denesting
for i in [-3, -2, 2, 3]:
for j in [-3, -2, 2, 3]:
for k in range(3):
assert Mod(Mod(k, i), j) == (k % i) % j
# known difference
assert Mod(5*sqrt(2), sqrt(5)) == 5*sqrt(2) - 3*sqrt(5)
p = symbols('p', positive=True)
assert Mod(p + 1, p + 3) == p + 1
n = symbols('n', negative=True)
assert Mod(n - 3, n - 1) == -2
assert Mod(n - 2*p, n - p) == -p
assert Mod(p - 2*n, p - n) == -n
# handling sums
assert (x + 3) % 1 == Mod(x, 1)
assert (x + 3.0) % 1 == Mod(1.*x, 1)
assert (x - S(33)/10) % 1 == Mod(x + S(7)/10, 1)
a = Mod(.6*x + y, .3*y)
b = Mod(0.1*y + 0.6*x, 0.3*y)
# Test that a, b are equal, with 1e-14 accuracy in coefficients
eps = 1e-14
assert abs((a.args[0] - b.args[0]).subs({x: 1, y: 1})) < eps
assert abs((a.args[1] - b.args[1]).subs({x: 1, y: 1})) < eps
assert (x + 1) % x == 1 % x
assert (x + y) % x == y % x
assert (x + y + 2) % x == (y + 2) % x
assert (a + 3*x + 1) % (2*x) == Mod(a + x + 1, 2*x)
assert (12*x + 18*y) % (3*x) == 3*Mod(6*y, x)
# gcd extraction
assert (-3*x) % (-2*y) == -Mod(3*x, 2*y)
assert (.6*pi) % (.3*x*pi) == 0.3*pi*Mod(2, x)
assert (.6*pi) % (.31*x*pi) == pi*Mod(0.6, 0.31*x)
assert (6*pi) % (.3*x*pi) == 0.3*pi*Mod(20, x)
assert (6*pi) % (.31*x*pi) == pi*Mod(6, 0.31*x)
assert (6*pi) % (.42*x*pi) == pi*Mod(6, 0.42*x)
assert (12*x) % (2*y) == 2*Mod(6*x, y)
assert (12*x) % (3*5*y) == 3*Mod(4*x, 5*y)
assert (12*x) % (15*x*y) == 3*x*Mod(4, 5*y)
assert (-2*pi) % (3*pi) == pi
assert (2*x + 2) % (x + 1) == 0
assert (x*(x + 1)) % (x + 1) == (x + 1)*Mod(x, 1)
assert Mod(5.0*x, 0.1*y) == 0.1*Mod(50*x, y)
i = Symbol('i', integer=True)
assert (3*i*x) % (2*i*y) == i*Mod(3*x, 2*y)
assert Mod(4*i, 4) == 0
# issue 8677
n = Symbol('n', integer=True, positive=True)
assert (factorial(n) % n).equals(0) is not False
# symbolic with known parity
n = Symbol('n', even=True)
assert Mod(n, 2) == 0
n = Symbol('n', odd=True)
assert Mod(n, 2) == 1
def test_Mod_is_integer():
p = Symbol('p', integer=True)
q1 = Symbol('q1', integer=True)
q2 = Symbol('q2', integer=True, nonzero=True)
assert Mod(x, y).is_integer is None
assert Mod(p, q1).is_integer is None
assert Mod(x, q2).is_integer is None
assert Mod(p, q2).is_integer
def test_Mod_is_nonposneg():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, positive=True)
assert (n%3).is_nonnegative
assert Mod(n, -3).is_nonpositive
assert Mod(n, k).is_nonnegative
assert Mod(n, -k).is_nonpositive
assert Mod(k, n).is_nonnegative is None
def test_issue_6001():
A = Symbol("A", commutative=False)
eq = A + A**2
# it doesn't matter whether it's True or False; they should
# just all be the same
assert (
eq.is_commutative ==
(eq + 1).is_commutative ==
(A + 1).is_commutative)
B = Symbol("B", commutative=False)
# Although commutative terms could cancel we return True
# meaning "there are non-commutative symbols; aftersubstitution
# that definition can change, e.g. (A*B).subs(B,A**-1) -> 1
assert (sqrt(2)*A).is_commutative is False
assert (sqrt(2)*A*B).is_commutative is False
def test_polar():
from sympy import polar_lift
p = Symbol('p', polar=True)
x = Symbol('x')
assert p.is_polar
assert x.is_polar is None
assert S(1).is_polar is None
assert (p**x).is_polar is True
assert (x**p).is_polar is None
assert ((2*p)**x).is_polar is True
assert (2*p).is_polar is True
assert (-2*p).is_polar is not True
assert (polar_lift(-2)*p).is_polar is True
q = Symbol('q', polar=True)
assert (p*q)**2 == p**2 * q**2
assert (2*q)**2 == 4 * q**2
assert ((p*q)**x).expand() == p**x * q**x
def test_issue_6040():
a, b = Pow(1, 2, evaluate=False), S.One
assert a != b
assert b != a
assert not (a == b)
assert not (b == a)
def test_issue_6082():
# Comparison is symmetric
assert Basic.compare(Max(x, 1), Max(x, 2)) == \
- Basic.compare(Max(x, 2), Max(x, 1))
# Equal expressions compare equal
assert Basic.compare(Max(x, 1), Max(x, 1)) == 0
# Basic subtypes (such as Max) compare different than standard types
assert Basic.compare(Max(1, x), frozenset((1, x))) != 0
def test_issue_6077():
assert x**2.0/x == x**1.0
assert x/x**2.0 == x**-1.0
assert x*x**2.0 == x**3.0
assert x**1.5*x**2.5 == x**4.0
assert 2**(2.0*x)/2**x == 2**(1.0*x)
assert 2**x/2**(2.0*x) == 2**(-1.0*x)
assert 2**x*2**(2.0*x) == 2**(3.0*x)
assert 2**(1.5*x)*2**(2.5*x) == 2**(4.0*x)
def test_mul_flatten_oo():
p = symbols('p', positive=True)
n, m = symbols('n,m', negative=True)
x_im = symbols('x_im', imaginary=True)
assert n*oo == -oo
assert n*m*oo == oo
assert p*oo == oo
assert x_im*oo != I*oo # i could be +/- 3*I -> +/-oo
def test_add_flatten():
# see https://github.com/sympy/sympy/issues/2633#issuecomment-29545524
a = oo + I*oo
b = oo - I*oo
assert a + b == nan
assert a - b == nan
assert (1/a).simplify() == (1/b).simplify() == 0
def test_issue_5160_6087_6089_6090():
# issue 6087
assert ((-2*x*y**y)**3.2).n(2) == (2**3.2*(-x*y**y)**3.2).n(2)
# issue 6089
A, B, C = symbols('A,B,C', commutative=False)
assert (2.*B*C)**3 == 8.0*(B*C)**3
assert (-2.*B*C)**3 == -8.0*(B*C)**3
assert (-2*B*C)**2 == 4*(B*C)**2
# issue 5160
assert sqrt(-1.0*x) == 1.0*sqrt(-x)
assert sqrt(1.0*x) == 1.0*sqrt(x)
# issue 6090
assert (-2*x*y*A*B)**2 == 4*x**2*y**2*(A*B)**2
def test_float_int():
assert int(float(sqrt(10))) == int(sqrt(10))
assert int(pi**1000) % 10 == 2
assert int(Float('1.123456789012345678901234567890e20', '')) == \
long(112345678901234567890)
assert int(Float('1.123456789012345678901234567890e25', '')) == \
long(11234567890123456789012345)
# decimal forces float so it's not an exact integer ending in 000000
assert int(Float('1.123456789012345678901234567890e35', '')) == \
112345678901234567890123456789000192
assert int(Float('123456789012345678901234567890e5', '')) == \
12345678901234567890123456789000000
assert Integer(Float('1.123456789012345678901234567890e20', '')) == \
112345678901234567890
assert Integer(Float('1.123456789012345678901234567890e25', '')) == \
11234567890123456789012345
# decimal forces float so it's not an exact integer ending in 000000
assert Integer(Float('1.123456789012345678901234567890e35', '')) == \
112345678901234567890123456789000192
assert Integer(Float('123456789012345678901234567890e5', '')) == \
12345678901234567890123456789000000
assert same_and_same_prec(Float('123000e-2',''), Float('1230.00', ''))
assert same_and_same_prec(Float('123000e2',''), Float('12300000', ''))
assert int(1 + Rational('.9999999999999999999999999')) == 1
assert int(pi/1e20) == 0
assert int(1 + pi/1e20) == 1
assert int(Add(1.2, -2, evaluate=False)) == int(1.2 - 2)
assert int(Add(1.2, +2, evaluate=False)) == int(1.2 + 2)
assert int(Add(1 + Float('.99999999999999999', ''), evaluate=False)) == 1
raises(TypeError, lambda: float(x))
raises(TypeError, lambda: float(sqrt(-1)))
assert int(12345678901234567890 + cos(1)**2 + sin(1)**2) == \
12345678901234567891
def test_issue_6611a():
assert Mul.flatten([3**Rational(1, 3),
Pow(-Rational(1, 9), Rational(2, 3), evaluate=False)]) == \
([Rational(1, 3), (-1)**Rational(2, 3)], [], None)
def test_denest_add_mul():
# when working with evaluated expressions make sure they denest
eq = x + 1
eq = Add(eq, 2, evaluate=False)
eq = Add(eq, 2, evaluate=False)
assert Add(*eq.args) == x + 5
eq = x*2
eq = Mul(eq, 2, evaluate=False)
eq = Mul(eq, 2, evaluate=False)
assert Mul(*eq.args) == 8*x
# but don't let them denest unecessarily
eq = Mul(-2, x - 2, evaluate=False)
assert 2*eq == Mul(-4, x - 2, evaluate=False)
assert -eq == Mul(2, x - 2, evaluate=False)
def test_mul_coeff():
# It is important that all Numbers be removed from the seq;
# This can be tricky when powers combine to produce those numbers
p = exp(I*pi/3)
assert p**2*x*p*y*p*x*p**2 == x**2*y
def test_mul_zero_detection():
nz = Dummy(real=True, zero=False, finite=True)
r = Dummy(real=True)
c = Dummy(real=False, complex=True, finite=True)
c2 = Dummy(real=False, complex=True, finite=True)
i = Dummy(imaginary=True, finite=True)
e = nz*r*c
assert e.is_imaginary is None
assert e.is_real is None
e = nz*c
assert e.is_imaginary is None
assert e.is_real is False
e = nz*i*c
assert e.is_imaginary is False
assert e.is_real is None
# check for more than one complex; it is important to use
# uniquely named Symbols to ensure that two factors appear
# e.g. if the symbols have the same name they just become
# a single factor, a power.
e = nz*i*c*c2
assert e.is_imaginary is None
assert e.is_real is None
# _eval_is_real and _eval_is_zero both employ trapping of the
# zero value so args should be tested in both directions and
# TO AVOID GETTING THE CACHED RESULT, Dummy MUST BE USED
# real is unknonwn
def test(z, b, e):
if z.is_zero and b.is_finite:
assert e.is_real and e.is_zero
else:
assert e.is_real is None
if b.is_finite:
if z.is_zero:
assert e.is_zero
else:
assert e.is_zero is None
elif b.is_finite is False:
if z.is_zero is None:
assert e.is_zero is None
else:
assert e.is_zero is False
for iz, ib in cartes(*[[True, False, None]]*2):
z = Dummy('z', nonzero=iz)
b = Dummy('f', finite=ib)
e = Mul(z, b, evaluate=False)
test(z, b, e)
z = Dummy('nz', nonzero=iz)
b = Dummy('f', finite=ib)
e = Mul(b, z, evaluate=False)
test(z, b, e)
# real is True
def test(z, b, e):
if z.is_zero and not b.is_finite:
assert e.is_real is None
else:
assert e.is_real
for iz, ib in cartes(*[[True, False, None]]*2):
z = Dummy('z', nonzero=iz, real=True)
b = Dummy('b', finite=ib, real=True)
e = Mul(z, b, evaluate=False)
test(z, b, e)
z = Dummy('z', nonzero=iz, real=True)
b = Dummy('b', finite=ib, real=True)
e = Mul(b, z, evaluate=False)
test(z, b, e)
def test_Mul_with_zero_infinite():
zer = Dummy(zero=True)
inf = Dummy(finite=False)
e = Mul(zer, inf, evaluate=False)
assert e.is_positive is None
assert e.is_hermitian is None
e = Mul(inf, zer, evaluate=False)
assert e.is_positive is None
assert e.is_hermitian is None
def test_issue_8247_8354():
from sympy import tan
z = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert z.is_positive is False # it's 0
z = S('''-2**(1/3)*(3*sqrt(93) + 29)**2 - 4*(3*sqrt(93) + 29)**(4/3) +
12*sqrt(93)*(3*sqrt(93) + 29)**(1/3) + 116*(3*sqrt(93) + 29)**(1/3) +
174*2**(1/3)*sqrt(93) + 1678*2**(1/3)''')
assert z.is_positive is False # it's 0
z = 2*(-3*tan(19*pi/90) + sqrt(3))*cos(11*pi/90)*cos(19*pi/90) - \
sqrt(3)*(-3 + 4*cos(19*pi/90)**2)
assert z.is_positive is not True # it's zero and it shouldn't hang
z = S('''9*(3*sqrt(93) + 29)**(2/3)*((3*sqrt(93) +
29)**(1/3)*(-2**(2/3)*(3*sqrt(93) + 29)**(1/3) - 2) - 2*2**(1/3))**3 +
72*(3*sqrt(93) + 29)**(2/3)*(81*sqrt(93) + 783) + (162*sqrt(93) +
1566)*((3*sqrt(93) + 29)**(1/3)*(-2**(2/3)*(3*sqrt(93) + 29)**(1/3) -
2) - 2*2**(1/3))**2''')
assert z.is_positive is False # it's 0 (and a single _mexpand isn't enough)
| bsd-3-clause |
jemmyw/ansible | lib/ansible/plugins/action/copy.py | 10 | 12996 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import pipes
import tempfile
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
from ansible.utils.unicode import to_bytes
from ansible.parsing.vault import VaultLib
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for file transfer operations '''
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
faf = self._task.first_available_file
remote_src = boolean(self._task.args.get('remote_src', False))
if (source is None and content is None and faf is None) or dest is None:
return dict(failed=True, msg="src (or content) and dest are required")
elif (source is not None or faf is not None) and content is not None:
return dict(failed=True, msg="src and content are mutually exclusive")
elif content is not None and dest is not None and dest.endswith("/"):
return dict(failed=True, msg="dest must be a file if content is defined")
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
source_trailing_slash = self._connection._shell.path_has_trailing_slash(source)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
return dict(failed=True, msg="could not write content temp file: %s" % err)
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif faf:
source = self._get_first_available_file(faf, task_vars.get('_original_file', None))
if source is None:
return dict(failed=True, msg="could not find src in first_available_file list")
elif remote_src:
new_module_args = self._task.args.copy()
del new_module_args['remote_src']
return self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=False)
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = []
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(source):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
sz = len(source) + 1
else:
sz = len(source.rsplit('/', 1)[0]) + 1
# Walk the directory and append the file tuples to source_files.
for base_path, sub_folders, files in os.walk(source):
for file in files:
full_path = os.path.join(base_path, file)
rel_path = full_path[sz:]
source_files.append((full_path, rel_path))
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
changed = False
module_result = {"changed": False}
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = (len(source_files) == 1)
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest, tmp)
diffs = []
for source_full, source_rel in source_files:
# Generate a hash of the local file.
local_checksum = checksum(source_full)
# If local_checksum is not defined we can't find the file so we should fail out.
if local_checksum is None:
return dict(failed=True, msg="could not find src=%s" % source_full)
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Attempt to get the remote checksum
remote_checksum = self._remote_checksum(tmp, dest_file, all_vars=task_vars)
if remote_checksum == '3':
# The remote_checksum was executed on a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
return dict(failed=True, msg="can not use content with a dir as dest")
else:
# Append the relative source location to the destination and retry remote_checksum
dest_file = self._connection._shell.join_path(dest, source_rel)
remote_checksum = self._remote_checksum(tmp, dest_file, all_vars=task_vars)
if remote_checksum != '1' and not force:
# remote_file does not exist so continue to next iteration.
continue
if local_checksum != remote_checksum:
# The checksums don't match and we will change or error out.
changed = True
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
if self._play_context.diff and not raw:
diffs.append(self._get_diff_data(tmp, dest_file, source_full, task_vars))
if self._play_context.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
changed = True
module_return = dict(changed=True)
continue
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
if not raw:
self._connection.put_file(source_full, tmp_src)
else:
self._connection.put_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
if self._play_context.become and self._play_context.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
# Continue to next iteration if raw is defined.
continue
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
if raw:
# Continue to next iteration if raw is defined.
self._remove_tmp_path(tmp)
continue
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel
)
)
# Execute the file module.
module_return = self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
if module_return.get('failed') == True:
return module_return
if module_return.get('changed') == True:
changed = True
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# Delete tmp path if we were recursive or if we did not execute a module.
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
if module_executed and len(source_files) == 1:
result = module_return
else:
result = dict(dest=dest, src=source, changed=changed)
if diffs:
result['diff'] = diffs
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
| gpl-3.0 |
listamilton/supermilton.repository | plugin.video.efilmes/bs4/element.py | 438 | 61538 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| gpl-2.0 |
blitzmann/Pyfa | scripts/dump_data.py | 3 | 3214 | #!/usr/bin/env python
"""
This script bootstraps Phobos from a supplied path and feeds it
information regarding EVE data paths and where to dump data. It then imports
some other scripts and uses them to convert the json data into a SQLite
database and then compare the new database to the existing one, producing a
diff which can then be used to assist in the updating.
"""
import sys
import os
# Phobos location
phb_path = os.path.expanduser("path/to/phobos")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--eve", dest="eve_path", help="Location of EVE directory", required=True)
parser.add_argument("-c", "--cache", dest="cache_path", help="Location of EVE cache directory. If not specified, an attempt will be make to automatically determine path.")
parser.add_argument("-r", "--res", dest="res_path", help="Location of EVE shared resource cache. If not specified, an attempt will be make to automatically determine path.")
parser.add_argument("-d", "--dump", dest="dump_path", help="Location of Phobos JSON dump directory", required=True)
parser.add_argument("-p", "--phobos", dest="phb_path", help="Location of Phobos, defaults to path noted in script", default=phb_path)
parser.add_argument("-s", "--singularity", action="store_true", help="Singularity build")
args = parser.parse_args()
eve_path = os.path.expanduser(args.eve_path)
cache_path = os.path.expanduser(args.cache_path) if args.cache_path else None
res_path = os.path.expanduser(args.res_path) if args.res_path else None
dump_path = os.path.expanduser(args.dump_path)
script_path = os.path.dirname(__file__)
### Append Phobos to path
sys.path.append(os.path.expanduser(args.phb_path))
def header(text, subtext=None):
print()
print("* "*30)
print(text.center(60))
if subtext:
print(subtext.center(60))
print("* "*30)
print()
header("Dumping Phobos Data", dump_path)
import reverence
from flow import FlowManager
from miner import *
from translator import Translator
from writer import *
rvr = reverence.blue.EVE(eve_path, cachepath=args.cache_path, sharedcachepath=res_path, server="singularity" if args.singularity else "tranquility")
print("EVE Directory: {}".format(rvr.paths.root))
print("Cache Directory: {}".format(rvr.paths.cache))
print("Shared Resource Directory: {}".format(rvr.paths.sharedcache))
pickle_miner = ResourcePickleMiner(rvr)
trans = Translator(pickle_miner)
bulkdata_miner = BulkdataMiner(rvr, trans)
staticcache_miner = ResourceStaticCacheMiner(rvr, trans)
miners = (
MetadataMiner(eve_path),
bulkdata_miner,
staticcache_miner,
TraitMiner(staticcache_miner, bulkdata_miner, trans),
SqliteMiner(rvr.paths.root, trans),
CachedCallsMiner(rvr, trans),
DynamicAttributesMiner(rvr),
pickle_miner
)
writers = (
JsonWriter(dump_path, indent=2),
)
list = "dgmexpressions,dgmattribs,dgmeffects,dgmtypeattribs,dgmtypeeffects,"\
"dgmunits,invcategories,invgroups,invmetagroups,invmetatypes,"\
"invtypes,mapbulk_marketGroups,phbmetadata,phbtraits,fsdTypeOverrides,"\
"evegroups,evetypes,evecategories,mapbulk_marketGroups,clonegrades,dynamicattributes"
FlowManager(miners, writers).run(list, "en-us")
| gpl-3.0 |
miguelpalacio/python-for-android | python-build/python-libs/gdata/src/gdata/Crypto/Util/number.py | 232 | 5495 | #
# number.py : Number-theoretic functions
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $"
bignum = long
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
# Commented out and replaced with faster versions below
## def long2str(n):
## s=''
## while n>0:
## s=chr(n & 255)+s
## n=n>>8
## return s
## import types
## def str2long(s):
## if type(s)!=types.StringType: return s # Integers will be left alone
## return reduce(lambda x,y : x*256+ord(y), s, 0L)
def size (N):
"""size(N:long) : int
Returns the size of the number N in bits.
"""
bits, power = 0,1L
while N >= power:
bits += 1
power = power << 1
return bits
def getRandomNumber(N, randfunc):
"""getRandomNumber(N:int, randfunc:callable):long
Return an N-bit random number."""
S = randfunc(N/8)
odd_bits = N % 8
if odd_bits != 0:
char = ord(randfunc(1)) >> (8-odd_bits)
S = chr(char) + S
value = bytes_to_long(S)
value |= 2L ** (N-1) # Ensure high bit is set
assert size(value) >= N
return value
def GCD(x,y):
"""GCD(x:long, y:long): long
Return the GCD of x and y.
"""
x = abs(x) ; y = abs(y)
while x > 0:
x, y = y % x, x
return y
def inverse(u, v):
"""inverse(u:long, u:long):long
Return the inverse of u mod v.
"""
u3, v3 = long(u), long(v)
u1, v1 = 1L, 0L
while v3 > 0:
q=u3 / v3
u1, v1 = v1, u1 - v1*q
u3, v3 = v3, u3 - v3*q
while u1<0:
u1 = u1 + v
return u1
# Given a number of bits to generate and a random generation function,
# find a prime number of the appropriate size.
def getPrime(N, randfunc):
"""getPrime(N:int, randfunc:callable):long
Return a random N-bit prime number.
"""
number=getRandomNumber(N, randfunc) | 1
while (not isPrime(number)):
number=number+2
return number
def isPrime(N):
"""isPrime(N:long):bool
Return true if N is prime.
"""
if N == 1:
return 0
if N in sieve:
return 1
for i in sieve:
if (N % i)==0:
return 0
# Use the accelerator if available
if _fastmath is not None:
return _fastmath.isPrime(N)
# Compute the highest bit that's set in N
N1 = N - 1L
n = 1L
while (n<N):
n=n<<1L
n = n >> 1L
# Rabin-Miller test
for c in sieve[:7]:
a=long(c) ; d=1L ; t=n
while (t): # Iterate over the bits in N1
x=(d*d) % N
if x==1L and d!=1L and d!=N1:
return 0 # Square root of 1 found
if N1 & t:
d=(x*a) % N
else:
d=x
t = t >> 1L
if d!=1L:
return 0
return 1
# Small primes used for checking primality; these are all the primes
# less than 256. This should be enough to eliminate most of the odd
# numbers before needing to do a Rabin-Miller test at all.
sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
197, 199, 211, 223, 227, 229, 233, 239, 241, 251]
# Improved conversion functions contributed by Barry Warsaw, after
# careful benchmarking
import struct
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffffL) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != '\000':
break
else:
# only happens when n == 0
s = '\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0L
unpack = struct.unpack
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = '\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + unpack('>I', s[i:i+4])[0]
return acc
# For backwards compatibility...
import warnings
def long2str(n, blocksize=0):
warnings.warn("long2str() has been replaced by long_to_bytes()")
return long_to_bytes(n, blocksize)
def str2long(s):
warnings.warn("str2long() has been replaced by bytes_to_long()")
return bytes_to_long(s)
| apache-2.0 |
cinnamoncoin/eloipool-1 | jsonrpcserver.py | 8 | 11072 | # Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from binascii import b2a_hex
import httpserver
import json
import logging
import networkserver
import socket
from time import time
import traceback
WithinLongpoll = httpserver.AsyncRequest
class _SentJSONError(BaseException):
def __init__(self, rv):
self.rv = rv
class JSONRPCHandler(httpserver.HTTPHandler):
default_quirks = {
'NELH': None, # FIXME: identify which clients have a problem with this
}
LPHeaders = {
'X-Long-Polling': None,
}
JSONRPCURIs = (b'/', b'/LP', b'/LP/')
logger = logging.getLogger('JSONRPCHandler')
def final_init(server):
pass
def __init__(self, *a, **ka):
super().__init__(*a, **ka)
self.UA = None
def sendReply(self, status=200, body=b'', headers=None, *a, **ka):
headers = dict(headers) if headers else {}
if body and body[0] == 123: # b'{'
headers.setdefault('Content-Type', 'application/json')
if status == 200 and self.path in self.JSONRPCURIs:
if not body:
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('X-Long-Polling', '/LP')
return super().sendReply(status, body, headers, *a, **ka)
def fmtError(self, reason = '', code = 100):
reason = json.dumps(reason)
reason = r'{"result":null,"id":null,"error":{"name":"JSONRPCError","code":%d,"message":%s}}' % (code, reason)
reason = reason.encode('utf8')
return reason
def doError(self, reason = '', code = 100):
reason = self.fmtError(reason, code)
return self.sendReply(500, reason)
_MidstateNotAdv = (b'phoenix', b'poclbm', b'gminor')
def doHeader_user_agent(self, value):
self.reqinfo['UA'] = value
self.UA = value.decode('latin-1') # technically ASCII, but latin-1 ignores errors
quirks = self.quirks
(UA, v, *x) = value.split(b'/', 1) + [None]
# Temporary HACK to keep working with older gmp-proxy
# NOTE: This will go away someday.
if UA == b'AuthServiceProxy':
# SubmitBlock Boolean
quirks['SBB'] = None
try:
if v[0] == b'v': v = v[1:]
v = tuple(map(int, v.split(b'.'))) + (0,0,0)
except:
pass
if UA in self._MidstateNotAdv:
if UA == b'phoenix':
if v != (1, 50, 0):
quirks['midstate'] = None
if v[0] < 2 and v[1] < 8 and v[2] < 1:
quirks['NELH'] = None
else:
quirks['midstate'] = None
def doHeader_x_minimum_wait(self, value):
self.reqinfo['MinWait'] = int(value)
def doHeader_x_mining_extensions(self, value):
self.extensions = value.decode('ascii').lower().split(' ')
def processLP(self, lpid):
lpw = self.server.LPId
if isinstance(lpid, str):
if lpw != lpid:
return
self.doLongpoll()
def doLongpoll(self, *a):
timeNow = time()
self._LP = True
self._LPCall = a
if 'NELH' not in self.quirks:
# [NOT No] Early Longpoll Headers
self.sendReply(200, body=None, headers=self.LPHeaders)
self.push(b"1\r\n{\r\n")
self.changeTask(self._chunkedKA, timeNow + 45)
else:
self.changeTask(None)
waitTime = self.reqinfo.get('MinWait', 15) # TODO: make default configurable
self.waitTime = waitTime + timeNow
totfromme = self.LPTrack()
self.server._LPClients[id(self)] = self
self.logger.debug("New LP client; %d total; %d from %s" % (len(self.server._LPClients), totfromme, self.remoteHost))
raise WithinLongpoll
def _chunkedKA(self):
# Keepalive via chunked transfer encoding
self.push(b"1\r\n \r\n")
self.changeTask(self._chunkedKA, time() + 45)
def LPTrack(self):
myip = self.remoteHost
if myip not in self.server.LPTracking:
self.server.LPTracking[myip] = 0
self.server.LPTracking[myip] += 1
myuser = self.Username
if myuser not in self.server.LPTrackingByUser:
self.server.LPTrackingByUser[myuser] = 0
self.server.LPTrackingByUser[myuser] += 1
return self.server.LPTracking[myip]
def LPUntrack(self):
self.server.LPTracking[self.remoteHost] -= 1
self.server.LPTrackingByUser[self.Username] -= 1
def cleanupLP(self):
# Called when the connection is closed
if not self._LP:
return
self.changeTask(None)
try:
del self.server._LPClients[id(self)]
except KeyError:
pass
self.LPUntrack()
def wakeLongpoll(self, wantClear = False):
now = time()
if now < self.waitTime:
self.changeTask(lambda: self.wakeLongpoll(wantClear), self.waitTime)
return
else:
self.changeTask(None)
self.LPUntrack()
self.server.tls.wantClear = wantClear
try:
rv = self._doJSON_i(*self._LPCall, longpoll=True)
except WithinLongpoll:
# Not sure why this would happen right now, but handle it sanely...
return
finally:
self.server.tls.wantClear = False
if 'NELH' not in self.quirks:
rv = rv[1:] # strip the '{' we already sent
self.push(('%x' % len(rv)).encode('utf8') + b"\r\n" + rv + b"\r\n0\r\n\r\n")
self.reset_request()
return
try:
self.sendReply(200, body=rv, headers=self.LPHeaders, tryCompression=False)
raise httpserver.RequestNotHandled
except httpserver.RequestHandled:
# Expected
pass
finally:
self.reset_request()
def _doJSON_i(self, reqid, method, params, longpoll = False):
try:
rv = getattr(self, method)(*params)
except WithinLongpoll:
self._LPCall = (reqid, method, params)
raise
except Exception as e:
self.logger.error(("Error during JSON-RPC call (UA=%s, IP=%s): %s%s\n" % (self.reqinfo.get('UA'), self.remoteHost, method, params)) + traceback.format_exc())
efun = self.fmtError if longpoll else self.doError
return efun(r'Service error: %s' % (e,))
rv = {'id': reqid, 'error': None, 'result': rv}
try:
rv = json.dumps(rv)
except:
efun = self.fmtError if longpoll else self.doError
return efun(r'Error encoding reply in JSON')
rv = rv.encode('utf8')
return rv if longpoll else self.sendReply(200, rv, headers=self._JSONHeaders)
def doJSON(self, data, longpoll = False):
# TODO: handle JSON errors
try:
data = data.decode('utf8')
except UnicodeDecodeError as e:
return self.doError(str(e))
if longpoll and not data:
self.JSONRPCId = jsonid = 1
self.JSONRPCMethod = 'getwork'
self._JSONHeaders = {}
return self.doLongpoll(1, 'doJSON_getwork', ())
try:
data = json.loads(data)
method = str(data['method']).lower()
self.JSONRPCId = jsonid = data['id']
self.JSONRPCMethod = method
method = 'doJSON_' + method
except ValueError:
return self.doError(r'Parse error')
except TypeError:
return self.doError(r'Bad call')
if not hasattr(self, method):
return self.doError(r'Procedure not found')
# TODO: handle errors as JSON-RPC
self._JSONHeaders = {}
params = data.setdefault('params', ())
procfun = self._doJSON_i
if longpoll and not params:
procfun = self.doLongpoll
return procfun(jsonid, method, params)
def handle_close(self):
self.cleanupLP()
super().handle_close()
def handle_request(self):
if not self.method in (b'GET', b'POST'):
return self.sendReply(405)
if not self.path in self.JSONRPCURIs:
if isinstance(self.path, bytes) and self.path[:5] == b'/src/':
return self.handle_src_request()
return self.sendReply(404)
if not self.Username:
return self.doAuthenticate()
try:
data = b''.join(self.incoming)
return self.doJSON(data, self.path[:3] == b'/LP')
except socket.error:
raise
except WithinLongpoll:
raise
except httpserver.RequestHandled:
raise
except:
self.logger.error(traceback.format_exc())
return self.doError('uncaught error')
def reset_request(self):
self._LP = False
self.JSONRPCMethod = None
super().reset_request()
setattr(JSONRPCHandler, 'doHeader_user-agent', JSONRPCHandler.doHeader_user_agent);
setattr(JSONRPCHandler, 'doHeader_x-minimum-wait', JSONRPCHandler.doHeader_x_minimum_wait);
setattr(JSONRPCHandler, 'doHeader_x-mining-extensions', JSONRPCHandler.doHeader_x_mining_extensions);
JSONRPCListener = networkserver.NetworkListener
class JSONRPCServer(networkserver.AsyncSocketServer):
logger = logging.getLogger('JSONRPCServer')
waker = True
def __init__(self, *a, **ka):
ka.setdefault('RequestHandlerClass', JSONRPCHandler)
super().__init__(*a, **ka)
self.SecretUser = None
self.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
self._LPId = 0
self.LPId = '%d' % (time(),)
self.LPRequest = False
self._LPClients = {}
self._LPWaitTime = time() + 15
self.LPTracking = {}
self.LPTrackingByUser = {}
def checkAuthentication(self, username, password):
return True
def final_init(self):
JSONRPCHandler.final_init(self)
def pre_schedule(self):
if self.LPRequest == 1:
self._LPsch()
def wakeLongpoll(self, wantClear = False):
if self.LPRequest:
self.logger.info('Ignoring longpoll attempt while another is waiting')
return
self._LPId += 1
self.LPId = '%d %d' % (time(), self._LPId)
self._LPWantClear = wantClear
self.LPRequest = 1
self.wakeup()
def _LPsch(self):
now = time()
if self._LPWaitTime > now:
delay = self._LPWaitTime - now
self.logger.info('Waiting %.3g seconds to longpoll' % (delay,))
self.schedule(self._actualLP, self._LPWaitTime)
self.LPRequest = 2
else:
self._actualLP()
def _actualLP(self):
self.LPRequest = False
C = tuple(self._LPClients.values())
self._LPClients = {}
if not C:
self.logger.info('Nobody to longpoll')
return
OC = len(C)
self.logger.debug("%d clients to wake up..." % (OC,))
now = time()
for ic in C:
self.lastHandler = ic
try:
ic.wakeLongpoll(self._LPWantClear)
except socket.error:
OC -= 1
# Ignore socket errors; let the main event loop take care of them later
except:
OC -= 1
self.logger.debug('Error waking longpoll handler:\n' + traceback.format_exc())
self._LPWaitTime = time()
self.logger.info('Longpoll woke up %d clients in %.3f seconds' % (OC, self._LPWaitTime - now))
self._LPWaitTime += 5 # TODO: make configurable: minimum time between longpolls
def TopLPers(self, n = 0x10):
tmp = list(self.LPTracking.keys())
tmp.sort(key=lambda k: self.LPTracking[k])
for jerk in map(lambda k: (k, self.LPTracking[k]), tmp[-n:]):
print(jerk)
def TopLPersByUser(self, n = 0x10):
tmp = list(self.LPTrackingByUser.keys())
tmp.sort(key=lambda k: self.LPTrackingByUser[k])
for jerk in map(lambda k: (k, self.LPTrackingByUser[k]), tmp[-n:]):
print(jerk)
| agpl-3.0 |
huzq/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 44 | 2463 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_blobs
# we create two clusters of random points
n_samples_1 = 1000
n_samples_2 = 100
centers = [[0.0, 0.0], [2.0, 2.0]]
clusters_std = [1.5, 0.5]
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],
centers=centers,
cluster_std=clusters_std,
random_state=0, shuffle=False)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
# plot the samples
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
plt.show()
| bsd-3-clause |
zhaojiedi1992/pm | pm/pipelines.py | 1 | 3614 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import csv
import codecs
from collections import OrderedDict
#from pm.comm.log import *
from pm.items import PmItem
from pm.spiders.pmData import *
class PmPipeline(object):
def process_item(self, item, spider):
return item
def trimsignal(str):
return str.replace("\n","").replace(" ","")
def changeOneData(item):
it = item
it["monitortime"]=it["monitortime"].replace(u"数据更新时间:", "")
if it["iscity"] == 2:
it["primarypollutant"] = trimsignal(it["primarypollutant"])
pass
elif it["iscity"] == 1:
it["aqi"] = trimsignal(it["aqi"])
it["pm25"] = trimsignal(it["pm25"])
it["pm10"] = trimsignal(it["pm10"])
it["co"] = trimsignal(it["co"])
it["pm25"] = trimsignal(it["aqi"])
it["no2"] = trimsignal(it["no2"])
it["o3"] = trimsignal(it["o3"])
it["o3_8h"] = trimsignal(it["o3_8h"])
it["so2"] = trimsignal(it["so2"])
it["pollutantlevel"] = re.findall(r".*((.+?)).*", it["pollutantlevel"])[0]
it["primarypollutant"] = trimsignal(it["primarypollutant"]).replace("首要污染物:", "")
class JsonWithEncodingPipeline(object):
def __init__(self):
#info("."*300)
self.file = codecs.open('data_utf8.json', 'w', encoding='utf-8')
self.file.write("[\n")
self.isfirst=True
def process_item(self, item, spider):
changeOneData(item)
if self.isfirst:
line =json.dumps(dict(item), ensure_ascii=False, sort_keys=False)
self.isfirst=False
else:
line = ",\n" +json.dumps( dict(item), ensure_ascii=False, sort_keys=False)
self.file.write(line)
return item
def close_spider(self, spider):
self.file.write("]")
self.file.close()
class CsvPipeline(object):
def open_spider(self, spider):
self.file = open(r'data_utf8.csv', 'w', encoding='utf-8')
fieldnames = list(PmItem.fields.keys())
#self.writer = csv.DictWriter(self.file, fieldnames=fieldnames, dialect='excel')
self.writer = csv.DictWriter(self.file, fieldnames=fieldnames)
self.writer.writeheader()
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
changeOneData(item)
if not item.get("city", None) is None:
self.writer.writerow(dict(item))
<<<<<<< HEAD
class OraclePipeline(object):
def open_spider(self, spider):
self.tool = databaseTool()
self.tool.start()
self.info=list(self.tool.getStationInfo())
self.file = open(r'data_utf8.log', 'w', encoding='utf-8')
def close_spider(self, spider):
self.tool.close()
def process_item(self, item, spider):
changeOneData(item)
hour = T_BUS_AIR_QUALITY_HOUR()
#hour = T_BUS_AIR_QUALITY_HOUR(**item)
keys=list(PmItem.fields.keys())
for key in keys:
try:
setattr(hour,key,item[key])
except:
pass
code = [ it for it in self.info if it[1]==item["city"] and it[2]==item["positionname"]]
if len(code) >0:
setattr(hour, "stationcode",code[0][0] )
else:
self.file.write(item["city"]+"," +item["positionname"] +"\n")
self.tool.addobj(hour)
=======
return item
>>>>>>> 4f91f40a29983efdc691d5cfcaf3592745ff8f6c
| apache-2.0 |
whuang001/cts | gui/Interest.py | 1 | 2153 | """ shwo user who share similar interest """
from PyQt5.QtWidgets import QWidget, QTableWidgetItem
from PyQt5 import QtCore, QtGui, QtWidgets
class Interest(QWidget):
def __init__(self, parent=None):
super(Interest, self).__init__(parent)
self.setupUi(self)
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(841, 591)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Form)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.tableWidget = QtWidgets.QTableWidget(self.groupBox)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(1)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.verticalLayout_2.addWidget(self.tableWidget)
self.verticalLayout.addWidget(self.groupBox, 0, QtCore.Qt.AlignHCenter)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.groupBox.setTitle(_translate("Form", "You may interest in these users:"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Form", "User ID"))
def setUser(self, userIDs):
self.tableWidget.setRowCount(0)
for i in range (len(userIDs) - 1):
rowPosition = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowPosition)
self.tableWidget.setItem(rowPosition, 0, QTableWidgetItem(userIDs[i][0])) | mit |
sunils34/buffer-django-nonrel | django/contrib/flatpages/models.py | 410 | 1134 | from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))
registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page."))
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
return self.url
| bsd-3-clause |
postfix/viper-1 | modules/clamav.py | 3 | 2365 | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def __init__(self):
super(ClamAV, self).__init__()
self.parser.add_argument('-s', '--socket', help='Specify an unix socket (default: Clamd Unix Socket)')
def run(self):
super(ClamAV, self).run()
if self.args is None:
return
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
daemon = None
socket = None
if self.args.socket is not None:
socket = self.args.socket
self.log('info', "Using socket {0} to connect to ClamAV daemon".format(socket))
try:
daemon = pyclamd.ClamdUnixSocket(socket)
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
try:
if not daemon:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
try:
if daemon.ping():
results = daemon.scan_file(__sessions__.current.file.path)
else:
self.log('error', "Unable to connect to the daemon")
except Exception as e:
self.log('error', "Unable to scan with antivirus daemon, {0}".format(e))
return
found = None
name = 'not found'
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))
| bsd-3-clause |
mydongistiny/external_chromium_org | ui/resources/resource_check/resource_scale_factors.py | 81 | 4858 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class InvalidPNGException(Exception):
pass
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR':
raise InvalidPNGException
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not '
'correctly uploaded to the code review tool and must be directly '
'submitted using the dcommit command.')
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
try:
base_dimensions = ImageSize(base_image)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, base_image)))
continue
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
try:
scaled_dimensions = ImageSize(image_path)
except InvalidPNGException:
results.append(self.output_api.PresubmitError(corrupt_png_error %
self.input_api.os_path.join(repository_path, image_path)))
continue
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
| bsd-3-clause |
zchking/odoo | addons/anonymization/anonymization.py | 77 | 28690 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '8.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hbenniou/trunk | examples/not-working/insertion-sort-collider.py | 8 | 1378 |
""" NOTE
Needs yade compiled with CGAL feature
"""
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop([Ig2_Facet_Sphere_ScGeom()],[Ip2_FrictMat_FrictMat_FrictPhys()],[Law2_ScGeom_FrictPhys_CundallStrack()],),
NewtonIntegrator(damping=0.01,gravity=[0,0,-10]),
]
mat=O.materials.append(FrictMat(young=1e3,poisson=.2,density=1000,frictionAngle=20))
O.bodies.append([
facet([[-1,-1,0],[1,-1,0],[0,1,0]],fixed=True,color=[1,0,0],material=mat),
facet([[1,-1,0],[0,1,0,],[1,.5,.5]],fixed=True,material=mat)
])
import random
if 1:
for i in range(0,100):
O.bodies.append(sphere([random.gauss(0,1),random.gauss(0,1),random.uniform(1,2)],random.uniform(.02,.05),material=mat))
O.bodies[len(O.bodies)-1].state.vel=Vector3(random.gauss(0,.1),random.gauss(0,.1),random.gauss(0,.1))
else:
O.bodies.append(sphere([0,0,.6],.5),material=mat)
O.dt=1e-4
O.saveTmp('init')
# compare 2 colliders:
if 1:
O.timingEnabled=True
from yade import timing
for collider in InsertionSortCollider(),PersistentTriangulationCollider(haveDistantTransient=True):
for i in range(2):
O.loadTmp('init')
replaceCollider(collider)
O.run(100,True)
timing.reset()
O.run(50000,True)
timing.stats()
else:
#O.run(100,True)
O.step()
print len(O.interactions)
#O.bodies[2].phys['se3']=[-.6,0,.6,1,0,0,0]
#O.step()
| gpl-2.0 |
jmartinm/invenio | modules/websubmit/lib/functions/Mail_Approval_Request_to_Referee.py | 33 | 20233 | ## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Mail_Approval_Request_to_Referee: A function to send an email to the referee
of a document informing him/her that a request for its approval has been
submitted by the user.
"""
__revision__ = "$Id$"
import os
import re
import sre_constants
from invenio.websubmit_dblayer import get_approval_request_notes
from invenio.websubmit_config import InvenioWebSubmitFunctionError, \
CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.config import CFG_CERN_SITE, \
CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_RECORD
from invenio.access_control_admin import acc_get_role_users, acc_get_role_id
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
from invenio.errorlib import register_exception
from invenio.search_engine import print_record
from invenio.mailutils import send_email
CFG_MAIL_BODY = """
A request for the approval of a document in the %(site-name)s has been
made and requires your attention as a referee. The details are as
follows:
Reference Number: [%(report-number)s]
Title: %(title)s
Author(s): %(authors)s
You can see the details of the record at the following address:
<%(site-url)s/%(CFG_SITE_RECORD)s/%(record-id)s>
Please register your decision by following the instructions at the
following address:
<%(site-url)s/submit/direct?%(report-number-fieldname)s=%(report-number)s&sub=%(approval-action)s%(doctype)s&combo%(doctype)s=%(category)s>
Below, you may find some additional information about the approval request:
%(notes)s
"""
def Mail_Approval_Request_to_Referee(parameters, curdir, form, user_info=None):
"""
This function sends an email to the referee of a document informing
him/her that a request for its approval has been submitted by the
user.
@param categ_file_appreq: (string) - some document types are
separated into different categories, each of which has its own
referee(s).
In such document types, it's necessary to know the document-
type's category in order to choose the referee.
This parameter provides a means by which the category information
can be extracted from a file in the current submission's working
directory. It should therefore be a filename.
@param categ_rnseek_appreq: (string) - some document types are
separated into different categories, each of which has its own
referee(s).
In such document types, it's necessary to know the document-
type's category in order to choose the referee.
This parameter provides a means by which the category information
can be extracted from the document's reference number.
It is infact a string that will be compiled into a regexp and
an attempt will be made to match it agains the document's reference
number starting from the left-most position.
The only pre-requisite is that the segment in which the category is
sought should be indicated with <CATEGORY>.
Thus, an example might be as follows:
ATL(-COM)?-<CATEGORY>-.+
This would allow "PHYS" in the following reference number to be
recognised as the category:
ATL-COM-PHYS-2008-001
@param edsrn: (string) - the name of the field in which the report
number should be placed when the referee visits the form for making
a decision.
@return: (string) - empty string.
"""
## Get the reference number (as global rn - sorry!) and the document type:
global sysno, rn
doctype = form['doctype']
########
## Get the parameters from the list:
########
## Get the name of the report-number file:
########
try:
edsrn_file = parameters["edsrn"]
except KeyError:
## No value given for the edsrn file:
msg = "Error in Mail_Approval_Request_to_Referee function: unable " \
"to determine the name of the file in which the document's " \
"report number should be stored."
raise InvenioWebSubmitFunctionError(msg)
else:
edsrn_file = str(edsrn_file)
edsrn_file = os.path.basename(edsrn_file).strip()
if edsrn_file == "":
msg = "Error in Mail_Approval_Request_to_Referee function: " \
"unable to determine the name of the file in which " \
"the document's report number should be stored."
raise InvenioWebSubmitFunctionError(msg)
########
## Get the name of the category file:
#######
try:
## If it has been provided, get the name of the file in which the
## category is stored:
category_file = parameters["categ_file_appreq"]
except KeyError:
## No value given for the category file:
category_file = None
else:
if category_file is not None:
category_file = str(category_file)
category_file = os.path.basename(category_file).strip()
if category_file == "":
category_file = None
########
## Get the regexp that is used to find the category in the report number:
########
try:
## If it has been provided, get the regexp used for identifying
## a document-type's category from its reference number:
category_rn_regexp = parameters["categ_rnseek_appreq"]
except KeyError:
## No value given for the category regexp:
category_rn_regexp = None
else:
if category_rn_regexp is not None:
category_rn_regexp = str(category_rn_regexp).strip()
if category_rn_regexp == "":
category_rn_regexp = None
#######
## Resolve the document type's category:
##
## This is a long process. The end result is that the category is extracted
## either from a file in curdir, or from the report number.
## If it's taken from the report number, the admin must configure the
## function to accept a regular expression that is used to find the
## category in the report number.
##
if category_file is not None and category_rn_regexp is not None:
## It is not valid to have both a category file and a pattern
## describing how to extract the category from a report number.
## raise an InvenioWebSubmitFunctionError
msg = "Error in Register_Approval_Request function: received " \
"instructions to search for the document's category in " \
"both its report number AND in a category file. Could " \
"not determine which to use - please notify the " \
"administrator."
raise InvenioWebSubmitFunctionError(msg)
elif category_file is not None:
## Attempt to recover the category information from a file in the
## current submission's working directory:
category = ParamFromFile("%s/%s" % (curdir, category_file))
if category is not None:
category = category.strip()
if category in (None, ""):
## The category cannot be resolved.
msg = "Error in Register_Approval_Request function: received " \
"instructions to search for the document's category in " \
"a category file, but could not recover the category " \
"from that file. An approval request therefore cannot " \
"be registered for the document."
raise InvenioWebSubmitFunctionError(msg)
elif category_rn_regexp is not None:
## Attempt to recover the category information from the document's
## reference number using the regexp in category_rn_regexp:
##
## Does the category regexp contain the key-phrase "<CATEG>"?
if category_rn_regexp.find("<CATEG>") != -1:
## Yes. Replace "<CATEG>" with "(?P<category>.+?)".
## For example, this:
## ATL(-COM)?-<CATEG>-
## Will be transformed into this:
## ATL(-COM)?-(?P<category>.+?)-
category_rn_final_regexp = \
category_rn_regexp.replace("<CATEG>", r"(?P<category>.+?)", 1)
else:
## The regexp for category didn't contain "<CATEG>", but this is
## mandatory.
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to search " \
"for the document type's category in its reference number, " \
"using a poorly formed search expression (no marker for " \
"the category was present.) Since the document's category " \
"therefore cannot be retrieved, an approval request cannot " \
"be registered for it. Please report this problem to the " \
"administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
##
try:
## Attempt to compile the regexp for finding the category:
re_categ_from_rn = re.compile(category_rn_final_regexp)
except sre_constants.error:
## The expression passed to this function could not be compiled
## into a regexp. Register this exception and raise an
## InvenioWebSubmitFunctionError:
exception_prefix = "Error in Register_Approval_Request function: " \
"The [%(doctype)s] submission has been " \
"configured to search for the document type's " \
"category in its reference number, using the " \
"following regexp: /%(regexp)s/. This regexp, " \
"however, could not be compiled correctly " \
"(created it from %(categ-search-term)s.)" \
% { 'doctype' : doctype, \
'regexp' : category_rn_final_regexp, \
'categ-search-term' : category_rn_regexp, }
register_exception(prefix=exception_prefix)
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to search " \
"for the document type's category in its reference number, " \
"using a poorly formed search expression. Since the " \
"document's category therefore cannot be retrieved, an " \
"approval request cannot be registered for it. Please " \
"report this problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## Now attempt to recover the category from the RN string:
m_categ_from_rn = re_categ_from_rn.match(rn)
if m_categ_from_rn is not None:
## The pattern matched in the string.
## Extract the category from the match:
try:
category = m_categ_from_rn.group("category")
except IndexError:
## There was no "category" group. That group is mandatory.
exception_prefix = \
"Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number using the following regexp: " \
"/%(regexp)s/. The search produced a match, but " \
"there was no \"category\" group in the match " \
"object although this group is mandatory. The " \
"regexp was compiled from the following string: " \
"[%(categ-search-term)s]." \
% { 'doctype' : doctype, \
'regexp' : category_rn_final_regexp, \
'categ-search-term' : category_rn_regexp, }
register_exception(prefix=exception_prefix)
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number, using a poorly formed search " \
"expression (there was no category marker). Since " \
"the document's category therefore cannot be " \
"retrieved, an approval request cannot be " \
"registered for it. Please report this problem to " \
"the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
category = category.strip()
if category == "":
msg = "Error in Register_Approval_Request function: " \
"The [%(doctype)s] submission has been " \
"configured to search for the document type's " \
"category in its reference number, but no " \
"category was found. The request for approval " \
"cannot be registered. Please report this " \
"problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## No match. Cannot find the category and therefore cannot
## continue:
msg = "Error in Register_Approval_Request function: The " \
"[%(doctype)s] submission has been configured to " \
"search for the document type's category in its " \
"reference number, but no match was made. The request " \
"for approval cannot be registered. Please report " \
"this problem to the administrator." \
% { 'doctype' : doctype, }
raise InvenioWebSubmitFunctionError(msg)
else:
## The document type has no category.
category = ""
##
## End of category recovery
#######
#######
## Get the title and author(s) from the record:
#######
## Author(s):
rec_authors = ""
rec_first_author = print_record(int(sysno), 'tm', "100__a")
rec_other_authors = print_record(int(sysno), 'tm', "700__a")
if rec_first_author != "":
rec_authors += "".join(["%s\n" % author.strip() for \
author in rec_first_author.split("\n")])
if rec_other_authors != "":
rec_authors += "".join(["%s\n" % author.strip() for \
author in rec_other_authors.split("\n")])
## Title:
rec_title = "".join(["%s\n" % title.strip() for title in \
print_record(int(sysno), 'tm', "245__a").split("\n")])
##
#######
## the normal approval action
approve_act = 'APP'
## Get notes about the approval request:
approval_notes = get_approval_request_notes(doctype, rn)
## Get the referee email address:
if CFG_CERN_SITE:
## The referees system in CERN now works with listbox membership.
## List names should take the format
## "service-cds-referee-doctype-category@cern.ch"
## Make sure that your list exists!
## FIXME - to be replaced by a mailing alias in webaccess in the
## future.
if doctype == 'ATN': ## Special case of 'RPR' action for doctype ATN
action = ParamFromFile("%s/%s" % (curdir,'act')).strip()
if action == 'RPR':
notetype = ParamFromFile("%s/%s" % (curdir,'ATN_NOTETYPE')).strip()
if notetype not in ('SLIDE','PROC'):
raise InvenioWebSubmitFunctionError('ERROR function Mail_Approval_Request_to_Referee:: do not recognize notetype ' + notetype)
if notetype == 'PROC':
approve_act = 'APR' # RPR PROC requires APR action to approve
referee_listname = "service-cds-referee-atn-proc@cern.ch"
elif notetype == 'SLIDE': ## SLIDES approval
approve_act = 'APS' # RPR SLIDE requires APS action to approve
referee_listname = "atlas-speakers-comm@cern.ch"
else:
raise InvenioWebSubmitFunctionError('ERROR function Mail_Approval_Request_to_Referee:: do not understand notetype: ' +notetype)
else:
referee_listname = "service-cds-referee-%s" % doctype.lower()
if category != "":
referee_listname += "-%s" % category.lower()
mailto_addresses = referee_listname + "@cern.ch"
if category == 'CDSTEST':
referee_listname = "service-cds-referee-%s" % doctype.lower()
referee_listname += "-%s" % category.lower()
mailto_addresses = referee_listname + "@cern.ch"
else:
referee_address = ""
## Try to retrieve the referee's email from the referee's database:
for user in \
acc_get_role_users(acc_get_role_id("referee_%s_%s" \
% (doctype, category))):
referee_address += user[1] + ","
## And if there are general referees:
for user in \
acc_get_role_users(acc_get_role_id("referee_%s_*" % doctype)):
referee_address += user[1] + ","
referee_address = re.sub(",$", "", referee_address)
# Creation of the mail for the referee
mailto_addresses = ""
if referee_address != "":
mailto_addresses = referee_address + ","
else:
mailto_addresses = re.sub(",$", "", mailto_addresses)
##
## Send the email:
mail_subj = "Request for approval of [%s]" % rn
mail_body = CFG_MAIL_BODY % \
{ 'site-name' : CFG_SITE_NAME,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'report-number-fieldname' : edsrn_file,
'report-number' : rn,
'title' : rec_title,
'authors' : rec_authors,
'site-url' : CFG_SITE_URL,
'record-id' : sysno,
'approval-action' : approve_act,
'doctype' : doctype,
'notes' : approval_notes,
'category' : category,
}
send_email(CFG_SITE_SUPPORT_EMAIL,
mailto_addresses,
mail_subj,
mail_body,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
##
return ""
| gpl-2.0 |
brakhane/python-mode | pymode/libs2/rope/refactor/move.py | 30 | 26601 | """A module containing classes for move refactoring
`create_move()` is a factory for creating move refactoring objects
based on inputs.
"""
from rope.base import pyobjects, codeanalyze, exceptions, pynames, taskhandle, evaluate, worder
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import importutils, rename, occurrences, sourceutils, functionutils
def create_move(project, resource, offset=None):
"""A factory for creating Move objects
Based on `resource` and `offset`, return one of `MoveModule`,
`MoveGlobal` or `MoveMethod` for performing move refactoring.
"""
if offset is None:
return MoveModule(project, resource)
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None:
raise exceptions.RefactoringError(
'Move only works on classes, functions, modules and methods.')
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.PyModule) or \
isinstance(pyobject, pyobjects.PyPackage):
return MoveModule(project, pyobject.get_resource())
if isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
return MoveMethod(project, resource, offset)
if isinstance(pyobject, pyobjects.PyDefinedObject) and \
isinstance(pyobject.parent, pyobjects.PyModule):
return MoveGlobal(project, resource, offset)
raise exceptions.RefactoringError(
'Move only works on global classes/functions, modules and methods.')
class MoveMethod(object):
"""For moving methods
It makes a new method in the destination class and changes
the body of the old method to call the new method. You can
inline the old method to change all of its occurrences.
"""
def __init__(self, project, resource, offset):
self.project = project
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
self.method_name = worder.get_name_at(resource, offset)
self.pyfunction = pyname.get_object()
if self.pyfunction.get_kind() != 'method':
raise exceptions.RefactoringError('Only normal methods'
' can be moved.')
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.pycore.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.pycore)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = self.pycore.get_string_module(result,
resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes
def get_method_name(self):
return self.method_name
def _get_used_imports(self, import_tools):
return importutils.get_imports(self.pycore, self.pyfunction)
def _get_changes_made_by_old_class(self, dest_attr, new_name):
pymodule = self.pyfunction.get_module()
indents = self._get_scope_indents(self.pyfunction)
body = 'return self.%s.%s(%s)\n' % (dest_attr, new_name,
self._get_passed_arguments_string())
region = sourceutils.get_body_region(self.pyfunction)
return (pymodule.get_resource(), region[0], region[1],
sourceutils.fix_indentation(body, indents))
def _get_scope_indents(self, pyobject):
pymodule = pyobject.get_module()
return sourceutils.get_indents(
pymodule.lines, pyobject.get_scope().get_start()) + \
sourceutils.get_indent(self.pycore)
def _get_changes_made_by_new_class(self, dest_attr, new_name):
old_pyclass = self.pyfunction.parent
if dest_attr not in old_pyclass:
raise exceptions.RefactoringError(
'Destination attribute <%s> not found' % dest_attr)
pyclass = old_pyclass[dest_attr].get_object().get_type()
if not isinstance(pyclass, pyobjects.PyClass):
raise exceptions.RefactoringError(
'Unknown class type for attribute <%s>' % dest_attr)
pymodule = pyclass.get_module()
resource = pyclass.get_module().get_resource()
start, end = sourceutils.get_body_region(pyclass)
pre_blanks = '\n'
if pymodule.source_code[start:end].strip() != 'pass':
pre_blanks = '\n\n'
start = end
indents = self._get_scope_indents(pyclass)
body = pre_blanks + sourceutils.fix_indentation(
self.get_new_method(new_name), indents)
return resource, start, end, body
def get_new_method(self, name):
return '%s\n%s' % (
self._get_new_header(name),
sourceutils.fix_indentation(self._get_body(),
sourceutils.get_indent(self.pycore)))
def _get_unchanged_body(self):
return sourceutils.get_body(self.pyfunction)
def _get_body(self, host='host'):
self_name = self._get_self_name()
body = self_name + ' = None\n' + self._get_unchanged_body()
pymodule = self.pycore.get_string_module(body)
finder = occurrences.create_finder(
self.pycore, self_name, pymodule[self_name])
result = rename.rename_in_module(finder, host, pymodule=pymodule)
if result is None:
result = body
return result[result.index('\n') + 1:]
def _get_self_name(self):
return self.pyfunction.get_param_names()[0]
def _get_new_header(self, name):
header = 'def %s(self' % name
if self._is_host_used():
header += ', host'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
header += ', ' + others
return header + '):'
def _get_passed_arguments_string(self):
result = ''
if self._is_host_used():
result = 'self'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
if result:
result += ', '
result += others
return result
def _is_host_used(self):
return self._get_body('__old_self') != self._get_unchanged_body()
class MoveGlobal(object):
"""For moving global function and classes"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
self.old_pyname = evaluate.eval_location(this_pymodule, offset)
self.old_name = self.old_pyname.get_object().get_name()
pymodule = self.old_pyname.get_object().get_module()
self.source = pymodule.get_resource()
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
self._check_exceptional_conditions()
def _check_exceptional_conditions(self):
if self.old_pyname is None or \
not isinstance(self.old_pyname.get_object(), pyobjects.PyDefinedObject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a class/function.')
moving_pyobject = self.old_pyname.get_object()
if not self._is_global(moving_pyobject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a global class/function.')
def _is_global(self, pyobject):
return pyobject.get_scope().parent == pyobject.get_module().get_scope()
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.exists():
raise exceptions.RefactoringError(
'Move destination does not exist.')
if dest.is_folder() and dest.has_child('__init__.py'):
dest = dest.get_child('__init__.py')
if dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for non-modules should not be folders.')
if self.source == dest:
raise exceptions.RefactoringError(
'Moving global elements to the same module.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving global <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file_ in resources:
job_set.started_job(file_.path)
if file_ == self.source:
changes.add_change(self._source_module_changes(dest))
elif file_ == dest:
changes.add_change(self._dest_module_changes(dest))
elif self.tools.occurs_in_module(resource=file_):
pymodule = self.pycore.resource_to_pyobject(file_)
# Changing occurrences
placeholder = '__rope_renaming_%s_' % self.old_name
source = self.tools.rename_in_module(placeholder,
resource=file_)
should_import = source is not None
# Removing out of date imports
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
# Adding new import
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
source = self.tools.new_source(pymodule, source)
if source != file_.read():
changes.add_change(ChangeContents(file_, source))
job_set.finished_job()
return changes
def _source_module_changes(self, dest):
placeholder = '__rope_moving_%s_' % self.old_name
handle = _ChangeMoveOccurrencesHandle(placeholder)
occurrence_finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname)
start, end = self._get_moving_region()
renamer = ModuleSkipRenamer(occurrence_finder, self.source,
handle, start, end)
source = renamer.get_changed_module()
if handle.occurred:
pymodule = self.pycore.get_string_module(source, self.source)
# Adding new import
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
return ChangeContents(self.source, source)
def _new_modname(self, dest):
return self.pycore.modname(dest)
def _dest_module_changes(self, dest):
# Changing occurrences
pymodule = self.pycore.resource_to_pyobject(dest)
source = self.tools.rename_in_module(self.old_name, pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
moving, imports = self._get_moving_element_with_imports()
source = self.tools.remove_old_imports(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
pymodule, has_changed = self._add_imports2(pymodule, imports)
module_with_imports = self.import_tools.module_imports(pymodule)
source = pymodule.source_code
lineno = 0
if module_with_imports.imports:
lineno = module_with_imports.imports[-1].end_line - 1
else:
while lineno < pymodule.lines.length() and \
pymodule.lines.get_line(lineno + 1).lstrip().startswith('#'):
lineno += 1
if lineno > 0:
cut = pymodule.lines.get_line_end(lineno) + 1
result = source[:cut] + '\n\n' + moving + source[cut:]
else:
result = moving + source
# Organizing imports
source = result
pymodule = self.pycore.get_string_module(source, dest)
source = self.import_tools.organize_imports(pymodule, sort=False,
unused=False)
return ChangeContents(dest, source)
def _get_moving_element_with_imports(self):
return moving_code_with_imports(
self.pycore, self.source, self._get_moving_element())
def _get_module_with_imports(self, source_code, resource):
pymodule = self.pycore.get_string_module(source_code, resource)
return self.import_tools.module_imports(pymodule)
def _get_moving_element(self):
start, end = self._get_moving_region()
moving = self.source.read()[start:end]
return moving.rstrip() + '\n'
def _get_moving_region(self):
pymodule = self.pycore.resource_to_pyobject(self.source)
lines = pymodule.lines
scope = self.old_pyname.get_object().get_scope()
start = lines.get_line_start(scope.get_start())
end_line = scope.get_end()
while end_line < lines.length() and \
lines.get_line(end_line + 1).strip() == '':
end_line += 1
end = min(lines.get_line_end(end_line) + 1, len(pymodule.source_code))
return start, end
def _add_imports2(self, pymodule, new_imports):
source = self.tools.add_imports(pymodule, new_imports)
if source is None:
return pymodule, False
else:
resource = pymodule.get_resource()
pymodule = self.pycore.get_string_module(source, resource)
return pymodule, True
class MoveModule(object):
"""For moving modules and packages"""
def __init__(self, project, resource):
self.project = project
self.pycore = project.pycore
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
if resource.is_folder() and not resource.has_child('__init__.py'):
raise exceptions.RefactoringError(
'Cannot move non-package folder.')
dummy_pymodule = self.pycore.get_string_module('')
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
self.source = self.old_pyname.get_object().get_resource()
if self.source.is_folder():
self.old_name = self.source.name
else:
self.old_name = self.source.name[:-3]
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
moving_pyobject = self.old_pyname.get_object()
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for modules should be packages.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving module <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting changes',
len(resources))
for module in resources:
job_set.started_job(module.path)
if module == self.source:
self._change_moving_module(changes, dest)
else:
source = self._change_occurrences_in_module(dest,
resource=module)
if source is not None:
changes.add_change(ChangeContents(module, source))
job_set.finished_job()
if self.project == self.source.project:
changes.add_change(MoveResource(self.source, dest.path))
return changes
def _new_modname(self, dest):
destname = self.pycore.modname(dest)
if destname:
return destname + '.' + self.old_name
return self.old_name
def _new_import(self, dest):
return importutils.NormalImport([(self._new_modname(dest), None)])
def _change_moving_module(self, changes, dest):
if not self.source.is_folder():
pymodule = self.pycore.resource_to_pyobject(self.source)
source = self.import_tools.relatives_to_absolutes(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self._change_occurrences_in_module(dest, pymodule)
source = self.tools.new_source(pymodule, source)
if source != self.source.read():
changes.add_change(ChangeContents(self.source, source))
def _change_occurrences_in_module(self, dest, pymodule=None,
resource=None):
if not self.tools.occurs_in_module(pymodule=pymodule,
resource=resource):
return
if pymodule is None:
pymodule = self.pycore.resource_to_pyobject(resource)
new_name = self._new_modname(dest)
new_import = self._new_import(dest)
source = self.tools.rename_in_module(
new_name, imports=True, pymodule=pymodule, resource=resource)
should_import = self.tools.occurs_in_module(
pymodule=pymodule, resource=resource, imports=False)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.add_imports(pymodule, [new_import])
source = self.tools.new_source(pymodule, source)
if source != pymodule.resource.read():
return source
class _ChangeMoveOccurrencesHandle(object):
def __init__(self, new_name):
self.new_name = new_name
self.occurred = False
def occurred_inside_skip(self, change_collector, occurrence):
pass
def occurred_outside_skip(self, change_collector, occurrence):
start, end = occurrence.get_primary_range()
change_collector.add_change(start, end, self.new_name)
self.occurred = True
class _MoveTools(object):
def __init__(self, pycore, source, pyname, old_name):
self.pycore = pycore
self.source = source
self.old_pyname = pyname
self.old_name = old_name
self.import_tools = importutils.ImportTools(self.pycore)
def remove_old_imports(self, pymodule):
old_source = pymodule.source_code
module_with_imports = self.import_tools.module_imports(pymodule)
class CanSelect(object):
changed = False
old_name = self.old_name
old_pyname = self.old_pyname
def __call__(self, name):
try:
if name == self.old_name and \
pymodule[name].get_object() == \
self.old_pyname.get_object():
self.changed = True
return False
except exceptions.AttributeNotFoundError:
pass
return True
can_select = CanSelect()
module_with_imports.filter_names(can_select)
new_source = module_with_imports.get_changed_source()
if old_source != new_source:
return new_source
def rename_in_module(self, new_name, pymodule=None,
imports=False, resource=None):
occurrence_finder = self._create_finder(imports)
source = rename.rename_in_module(
occurrence_finder, new_name, replace_primary=True,
pymodule=pymodule, resource=resource)
return source
def occurs_in_module(self, pymodule=None, resource=None, imports=True):
finder = self._create_finder(imports)
for occurrence in finder.find_occurrences(pymodule=pymodule,
resource=resource):
return True
return False
def _create_finder(self, imports):
return occurrences.create_finder(self.pycore, self.old_name,
self.old_pyname, imports=imports)
def new_pymodule(self, pymodule, source):
if source is not None:
return self.pycore.get_string_module(
source, pymodule.get_resource())
return pymodule
def new_source(self, pymodule, source):
if source is None:
return pymodule.source_code
return source
def add_imports(self, pymodule, new_imports):
return _add_imports_to_module(self.import_tools, pymodule, new_imports)
def _add_imports_to_module(import_tools, pymodule, new_imports):
module_with_imports = import_tools.module_imports(pymodule)
for new_import in new_imports:
module_with_imports.add_import(new_import)
return module_with_imports.get_changed_source()
def moving_code_with_imports(pycore, resource, source):
import_tools = importutils.ImportTools(pycore)
pymodule = pycore.get_string_module(source, resource)
origin = pycore.resource_to_pyobject(resource)
imports = []
for stmt in import_tools.module_imports(origin).imports:
imports.append(stmt.import_info)
back_names = []
for name in origin:
if name not in pymodule:
back_names.append(name)
imports.append(import_tools.get_from_import(resource, back_names))
source = _add_imports_to_module(import_tools, pymodule, imports)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.relatives_to_absolutes(pymodule)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.organize_imports(pymodule, selfs=False)
pymodule = pycore.get_string_module(source, resource)
# extracting imports after changes
module_imports = import_tools.module_imports(pymodule)
imports = [import_stmt.import_info
for import_stmt in module_imports.imports]
start = 1
if module_imports.imports:
start = module_imports.imports[-1].end_line
lines = codeanalyze.SourceLinesAdapter(source)
while start < lines.length() and not lines.get_line(start).strip():
start += 1
moving = source[lines.get_line_start(start):]
return moving, imports
class ModuleSkipRenamerHandle(object):
def occurred_outside_skip(self, change_collector, occurrence):
pass
def occurred_inside_skip(self, change_collector, occurrence):
pass
class ModuleSkipRenamer(object):
"""Rename occurrences in a module
This class can be used when you want to treat a region in a file
separately from other parts when renaming.
"""
def __init__(self, occurrence_finder, resource, handle=None,
skip_start=0, skip_end=0, replacement=''):
"""Constructor
if replacement is `None` the region is not changed. Otherwise
it is replaced with `replacement`.
"""
self.occurrence_finder = occurrence_finder
self.resource = resource
self.skip_start = skip_start
self.skip_end = skip_end
self.replacement = replacement
self.handle = handle
if self.handle is None:
self.handle = ModuleSkipHandle()
def get_changed_module(self):
source = self.resource.read()
change_collector = codeanalyze.ChangeCollector(source)
if self.replacement is not None:
change_collector.add_change(self.skip_start, self.skip_end,
self.replacement)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
start, end = occurrence.get_primary_range()
if self.skip_start <= start < self.skip_end:
self.handle.occurred_inside_skip(change_collector, occurrence)
else:
self.handle.occurred_outside_skip(change_collector, occurrence)
result = change_collector.get_changed()
if result is not None and result != source:
return result
| lgpl-3.0 |
yhe39/crosswalk-test-suite | webapi/tct-filewriterapi-w3c-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
noba3/KoTos | addons/plugin.video.mega/resources/lib/platform_libraries/Xbox/Crypto/Protocol/AllOrNothing.py | 226 | 10952 | """This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $"
import operator
import string
from Crypto.Util.number import bytes_to_long, long_to_bytes
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if self.__key_size == 0:
self.__key_size = 16
__K0digit = chr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + ' ' * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = len(text) / block_size
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return map(long_to_bytes, blocks)
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError, "List must be at least length 2."
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = map(bytes_to_long, blocks)
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
# Since we have all the blocks (or this method would have been called
# prematurely), we can calcualte all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key))
block_size = self.__ciphermodule.block_size
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = string.join(map(long_to_bytes, parts[:-1]), '')
return text[:-padbytes]
def _inventkey(self, key_size):
# TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's
# kernelrand module
import time
from Crypto.Util import randpool
# TBD: key_size * 2 to work around possible bug in RandomPool?
pool = randpool.RandomPool(key_size * 2)
while key_size > pool.entropy:
pool.add_event()
# we now have enough entropy in the pool to get a key_size'd key
return pool.get_bytes(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print msg
print usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule}
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error, msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
a = AllOrNothing(module)
print 'Original text:\n=========='
print __doc__
print '=========='
msgblocks = a.digest(__doc__)
print 'message blocks:'
for i, blk in map(None, range(len(msgblocks)), msgblocks):
# base64 adds a trailing newline
print ' %3d' % i,
if aslong:
print bytes_to_long(blk)
else:
print base64.encodestring(blk)[:-1]
#
# get a new undigest-only object so there's no leakage
b = AllOrNothing(module)
text = b.undigest(msgblocks)
if text == __doc__:
print 'They match!'
else:
print 'They differ!'
| gpl-2.0 |
tkasp/osmose-backend | analysers/analyser_osmosis_relation_open.py | 4 | 3760 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2020 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Osmosis import Analyser_Osmosis
sql10 = """
SELECT
t.id,
nodes.id AS nid,
ST_AsText(nodes.geom),
CASE
WHEN admin_level IN ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14')
THEN 100 + admin_level::int
WHEN type = 'boundary' THEN 1
WHEN type = 'multipolygon' THEN 5
END
FROM (
SELECT
relations.id,
relations.tags->'type' AS type,
relations.tags->'admin_level' AS admin_level,
ends(ways.nodes) AS nid
FROM
relations
JOIN relation_members ON
relation_members.relation_id = relations.id AND
relation_members.member_type = 'W'
JOIN ways ON
ways.id = relation_members.member_id
WHERE
relations.tags?'type' AND
relations.tags->'type' IN ('boundary', 'multipolygon')
GROUP BY
relations.id,
relations.tags->'type',
relations.tags->'admin_level',
nid
HAVING
count(*) % 2 != 0
) AS t
JOIN nodes ON
nodes.id = t.nid
"""
class Analyser_Osmosis_Relation_Open(Analyser_Osmosis):
def __init__(self, config, logger = None):
Analyser_Osmosis.__init__(self, config, logger)
detail = T_(
'''A relation that should be a closed polygon and it is not. An issue is
reported at each end of open part.''')
self.classs[1] = self.def_class(item = 6010, level = 3, tags = ['geom', 'boundary'],
title = T_('Open relation type=boundary'),
detail = detail)
self.classs[5] = self.def_class(item = 1170, level = 2, tags = ['geom'],
title = T_('Open relation type=multipolygon'))
for admin_level in range(0, 15):
if admin_level <= 6:
level = 1
elif admin_level <= 8:
level = 2
else:
level = 3
self.classs[100 + admin_level] = self.def_class(item = 6010, level = level, tags = ['geom', 'boundary'],
title = T_('Open relation type=boundary admin_level={0}', admin_level),
detail = detail)
def analyser_osmosis_common(self):
self.run(sql10, lambda res: {"class": res[3], "data":[self.relation_full, self.node, self.positionAsText]})
| gpl-3.0 |
PRIMEDesigner15/PRIMEDesigner15 | dependencies/Lib/test/unittests/test_nntplib.py | 23 | 56030 | import io
import socket
import datetime
import textwrap
import unittest
import functools
import contextlib
from test import support
from nntplib import NNTP, GroupInfo, _have_ssl
import nntplib
if _have_ssl:
import ssl
TIMEOUT = 30
# TODO:
# - test the `file` arg to more commands
# - test error conditions
# - test auth and `usenetrc`
class NetworkedNNTPTestsMixin:
def test_welcome(self):
welcome = self.server.getwelcome()
self.assertEqual(str, type(welcome))
def test_help(self):
resp, lines = self.server.help()
self.assertTrue(resp.startswith("100 "), resp)
for line in lines:
self.assertEqual(str, type(line))
def test_list(self):
resp, groups = self.server.list()
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_list_active(self):
resp, groups = self.server.list(self.GROUP_PAT)
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_unknown_command(self):
with self.assertRaises(nntplib.NNTPPermanentError) as cm:
self.server._shortcmd("XYZZY")
resp = cm.exception.response
self.assertTrue(resp.startswith("500 "), resp)
def test_newgroups(self):
# gmane gets a constant influx of new groups. In order not to stress
# the server too much, we choose a recent date in the past.
dt = datetime.date.today() - datetime.timedelta(days=7)
resp, groups = self.server.newgroups(dt)
if len(groups) > 0:
self.assertIsInstance(groups[0], GroupInfo)
self.assertIsInstance(groups[0].group, str)
def test_description(self):
def _check_desc(desc):
# Sanity checks
self.assertIsInstance(desc, str)
self.assertNotIn(self.GROUP_NAME, desc)
desc = self.server.description(self.GROUP_NAME)
_check_desc(desc)
# Another sanity check
self.assertIn("Python", desc)
# With a pattern
desc = self.server.description(self.GROUP_PAT)
_check_desc(desc)
# Shouldn't exist
desc = self.server.description("zk.brrtt.baz")
self.assertEqual(desc, '')
def test_descriptions(self):
resp, descs = self.server.descriptions(self.GROUP_PAT)
# 215 for LIST NEWSGROUPS, 282 for XGTITLE
self.assertTrue(
resp.startswith("215 ") or resp.startswith("282 "), resp)
self.assertIsInstance(descs, dict)
desc = descs[self.GROUP_NAME]
self.assertEqual(desc, self.server.description(self.GROUP_NAME))
def test_group(self):
result = self.server.group(self.GROUP_NAME)
self.assertEqual(5, len(result))
resp, count, first, last, group = result
self.assertEqual(group, self.GROUP_NAME)
self.assertIsInstance(count, int)
self.assertIsInstance(first, int)
self.assertIsInstance(last, int)
self.assertLessEqual(first, last)
self.assertTrue(resp.startswith("211 "), resp)
def test_date(self):
resp, date = self.server.date()
self.assertIsInstance(date, datetime.datetime)
# Sanity check
self.assertGreaterEqual(date.year, 1995)
self.assertLessEqual(date.year, 2030)
def _check_art_dict(self, art_dict):
# Some sanity checks for a field dictionary returned by OVER / XOVER
self.assertIsInstance(art_dict, dict)
# NNTP has 7 mandatory fields
self.assertGreaterEqual(art_dict.keys(),
{"subject", "from", "date", "message-id",
"references", ":bytes", ":lines"}
)
for v in art_dict.values():
self.assertIsInstance(v, (str, type(None)))
def test_xover(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xover(last - 5, last)
if len(lines) == 0:
self.skipTest("no articles retrieved")
# The 'last' article is not necessarily part of the output (cancelled?)
art_num, art_dict = lines[0]
self.assertGreaterEqual(art_num, last - 5)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
def test_over(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
start = last - 10
# The "start-" article range form
resp, lines = self.server.over((start, None))
art_num, art_dict = lines[0]
self._check_art_dict(art_dict)
# The "start-end" article range form
resp, lines = self.server.over((start, last))
art_num, art_dict = lines[-1]
# The 'last' article is not necessarily part of the output (cancelled?)
self.assertGreaterEqual(art_num, start)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
# XXX The "message_id" form is unsupported by gmane
# 503 Overview by message-ID unsupported
def test_xhdr(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xhdr('subject', last)
for line in lines:
self.assertEqual(str, type(line[1]))
def check_article_resp(self, resp, article, art_num=None):
self.assertIsInstance(article, nntplib.ArticleInfo)
if art_num is not None:
self.assertEqual(article.number, art_num)
for line in article.lines:
self.assertIsInstance(line, bytes)
# XXX this could exceptionally happen...
self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n"))
def test_article_head_body(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
# Try to find an available article
for art_num in (last, first, last - 1):
try:
resp, head = self.server.head(art_num)
except nntplib.NNTPTemporaryError as e:
if not e.response.startswith("423 "):
raise
# "423 No such article" => choose another one
continue
break
else:
self.skipTest("could not find a suitable article number")
self.assertTrue(resp.startswith("221 "), resp)
self.check_article_resp(resp, head, art_num)
resp, body = self.server.body(art_num)
self.assertTrue(resp.startswith("222 "), resp)
self.check_article_resp(resp, body, art_num)
resp, article = self.server.article(art_num)
self.assertTrue(resp.startswith("220 "), resp)
self.check_article_resp(resp, article, art_num)
# Tolerate running the tests from behind a NNTP virus checker
blacklist = lambda line: line.startswith(b'X-Antivirus')
filtered_head_lines = [line for line in head.lines
if not blacklist(line)]
filtered_lines = [line for line in article.lines
if not blacklist(line)]
self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines)
def test_capabilities(self):
# The server under test implements NNTP version 2 and has a
# couple of well-known capabilities. Just sanity check that we
# got them.
def _check_caps(caps):
caps_list = caps['LIST']
self.assertIsInstance(caps_list, (list, tuple))
self.assertIn('OVERVIEW.FMT', caps_list)
self.assertGreaterEqual(self.server.nntp_version, 2)
_check_caps(self.server.getcapabilities())
# This re-emits the command
resp, caps = self.server.capabilities()
_check_caps(caps)
if _have_ssl:
def test_starttls(self):
file = self.server.file
sock = self.server.sock
try:
self.server.starttls()
except nntplib.NNTPPermanentError:
self.skipTest("STARTTLS not supported by server.")
else:
# Check that the socket and internal pseudo-file really were
# changed.
self.assertNotEqual(file, self.server.file)
self.assertNotEqual(sock, self.server.sock)
# Check that the new socket really is an SSL one
self.assertIsInstance(self.server.sock, ssl.SSLSocket)
# Check that trying starttls when it's already active fails.
self.assertRaises(ValueError, self.server.starttls)
def test_zlogin(self):
# This test must be the penultimate because further commands will be
# refused.
baduser = "notarealuser"
badpw = "notarealpassword"
# Check that bogus credentials cause failure
self.assertRaises(nntplib.NNTPError, self.server.login,
user=baduser, password=badpw, usenetrc=False)
# FIXME: We should check that correct credentials succeed, but that
# would require valid details for some server somewhere to be in the
# test suite, I think. Gmane is anonymous, at least as used for the
# other tests.
def test_zzquit(self):
# This test must be called last, hence the name
cls = type(self)
try:
self.server.quit()
finally:
cls.server = None
@classmethod
def wrap_methods(cls):
# Wrap all methods in a transient_internet() exception catcher
# XXX put a generic version in test.support?
def wrap_meth(meth):
@functools.wraps(meth)
def wrapped(self):
with support.transient_internet(self.NNTP_HOST):
meth(self)
return wrapped
for name in dir(cls):
if not name.startswith('test_'):
continue
meth = getattr(cls, name)
if not callable(meth):
continue
# Need to use a closure so that meth remains bound to its current
# value
setattr(cls, name, wrap_meth(meth))
def test_with_statement(self):
def is_connected():
if not hasattr(server, 'file'):
return False
try:
server.help()
except (socket.error, EOFError):
return False
return True
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
self.assertTrue(is_connected())
self.assertTrue(server.help())
self.assertFalse(is_connected())
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
server.quit()
self.assertFalse(is_connected())
NetworkedNNTPTestsMixin.wrap_methods()
class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase):
# This server supports STARTTLS (gmane doesn't)
NNTP_HOST = 'news.trigofacile.com'
GROUP_NAME = 'fr.comp.lang.python'
GROUP_PAT = 'fr.comp.lang.*'
NNTP_CLASS = NNTP
@classmethod
def setUpClass(cls):
support.requires("network")
with support.transient_internet(cls.NNTP_HOST):
cls.server = cls.NNTP_CLASS(cls.NNTP_HOST, timeout=TIMEOUT, usenetrc=False)
@classmethod
def tearDownClass(cls):
if cls.server is not None:
cls.server.quit()
if _have_ssl:
class NetworkedNNTP_SSLTests(NetworkedNNTPTests):
# Technical limits for this public NNTP server (see http://www.aioe.org):
# "Only two concurrent connections per IP address are allowed and
# 400 connections per day are accepted from each IP address."
NNTP_HOST = 'nntp.aioe.org'
GROUP_NAME = 'comp.lang.python'
GROUP_PAT = 'comp.lang.*'
NNTP_CLASS = nntplib.NNTP_SSL
# Disabled as it produces too much data
test_list = None
# Disabled as the connection will already be encrypted.
test_starttls = None
#
# Non-networked tests using a local server (or something mocking it).
#
class _NNTPServerIO(io.RawIOBase):
"""A raw IO object allowing NNTP commands to be received and processed
by a handler. The handler can push responses which can then be read
from the IO object."""
def __init__(self, handler):
io.RawIOBase.__init__(self)
# The channel from the client
self.c2s = io.BytesIO()
# The channel to the client
self.s2c = io.BytesIO()
self.handler = handler
self.handler.start(self.c2s.readline, self.push_data)
def readable(self):
return True
def writable(self):
return True
def push_data(self, data):
"""Push (buffer) some data to send to the client."""
pos = self.s2c.tell()
self.s2c.seek(0, 2)
self.s2c.write(data)
self.s2c.seek(pos)
def write(self, b):
"""The client sends us some data"""
pos = self.c2s.tell()
self.c2s.write(b)
self.c2s.seek(pos)
self.handler.process_pending()
return len(b)
def readinto(self, buf):
"""The client wants to read a response"""
self.handler.process_pending()
b = self.s2c.read(len(buf))
n = len(b)
buf[:n] = b
return n
class MockedNNTPTestsMixin:
# Override in derived classes
handler_class = None
def setUp(self):
super().setUp()
self.make_server()
def tearDown(self):
super().tearDown()
del self.server
def make_server(self, *args, **kwargs):
self.handler = self.handler_class()
self.sio = _NNTPServerIO(self.handler)
# Using BufferedRWPair instead of BufferedRandom ensures the file
# isn't seekable.
file = io.BufferedRWPair(self.sio, self.sio)
self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs)
return self.server
class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin):
def setUp(self):
super().setUp()
self.make_server(readermode=True)
class NNTPv1Handler:
"""A handler for RFC 977"""
welcome = "200 NNTP mock server"
def start(self, readline, push_data):
self.in_body = False
self.allow_posting = True
self._readline = readline
self._push_data = push_data
self._logged_in = False
self._user_sent = False
# Our welcome
self.handle_welcome()
def _decode(self, data):
return str(data, "utf-8", "surrogateescape")
def process_pending(self):
if self.in_body:
while True:
line = self._readline()
if not line:
return
self.body.append(line)
if line == b".\r\n":
break
try:
meth, tokens = self.body_callback
meth(*tokens, body=self.body)
finally:
self.body_callback = None
self.body = None
self.in_body = False
while True:
line = self._decode(self._readline())
if not line:
return
if not line.endswith("\r\n"):
raise ValueError("line doesn't end with \\r\\n: {!r}".format(line))
line = line[:-2]
cmd, *tokens = line.split()
#meth = getattr(self.handler, "handle_" + cmd.upper(), None)
meth = getattr(self, "handle_" + cmd.upper(), None)
if meth is None:
self.handle_unknown()
else:
try:
meth(*tokens)
except Exception as e:
raise ValueError("command failed: {!r}".format(line)) from e
else:
if self.in_body:
self.body_callback = meth, tokens
self.body = []
def expect_body(self):
"""Flag that the client is expected to post a request body"""
self.in_body = True
def push_data(self, data):
"""Push some binary data"""
self._push_data(data)
def push_lit(self, lit):
"""Push a string literal"""
lit = textwrap.dedent(lit)
lit = "\r\n".join(lit.splitlines()) + "\r\n"
lit = lit.encode('utf-8')
self.push_data(lit)
def handle_unknown(self):
self.push_lit("500 What?")
def handle_welcome(self):
self.push_lit(self.welcome)
def handle_QUIT(self):
self.push_lit("205 Bye!")
def handle_DATE(self):
self.push_lit("111 20100914001155")
def handle_GROUP(self, group):
if group == "fr.comp.lang.python":
self.push_lit("211 486 761 1265 fr.comp.lang.python")
else:
self.push_lit("411 No such group {}".format(group))
def handle_HELP(self):
self.push_lit("""\
100 Legal commands
authinfo user Name|pass Password|generic <prog> <args>
date
help
Report problems to <root@example.org>
.""")
def handle_STAT(self, message_spec=None):
if message_spec is None:
self.push_lit("412 No newsgroup selected")
elif message_spec == "3000234":
self.push_lit("223 3000234 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("223 0 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
def handle_NEXT(self):
self.push_lit("223 3000237 <668929@example.org> retrieved")
def handle_LAST(self):
self.push_lit("223 3000234 <45223423@example.com> retrieved")
def handle_LIST(self, action=None, param=None):
if action is None:
self.push_lit("""\
215 Newsgroups in form "group high low flags".
comp.lang.python 0000052340 0000002828 y
comp.lang.python.announce 0000001153 0000000993 m
free.it.comp.lang.python 0000000002 0000000002 y
fr.comp.lang.python 0000001254 0000000760 y
free.it.comp.lang.python.learner 0000000000 0000000001 y
tw.bbs.comp.lang.python 0000000304 0000000304 y
.""")
elif action == "ACTIVE":
if param == "*distutils*":
self.push_lit("""\
215 Newsgroups in form "group high low flags"
gmane.comp.python.distutils.devel 0000014104 0000000001 m
gmane.comp.python.distutils.cvs 0000000000 0000000001 m
.""")
else:
self.push_lit("""\
215 Newsgroups in form "group high low flags"
.""")
elif action == "OVERVIEW.FMT":
self.push_lit("""\
215 Order of fields in overview database.
Subject:
From:
Date:
Message-ID:
References:
Bytes:
Lines:
Xref:full
.""")
elif action == "NEWSGROUPS":
assert param is not None
if param == "comp.lang.python":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python\tThe Python computer language.
.""")
elif param == "comp.lang.python*":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python.announce\tAnnouncements about the Python language. (Moderated)
comp.lang.python\tThe Python computer language.
.""")
else:
self.push_lit("""\
215 Descriptions in form "group description".
.""")
else:
self.push_lit('501 Unknown LIST keyword')
def handle_NEWNEWS(self, group, date_str, time_str):
# We hard code different return messages depending on passed
# argument and date syntax.
if (group == "comp.lang.python" and date_str == "20100913"
and time_str == "082004"):
# Date was passed in RFC 3977 format (NNTP "v2")
self.push_lit("""\
230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == "comp.lang.python" and date_str == "100913"
and time_str == "082004"):
# Date was passed in RFC 977 format (NNTP "v1")
self.push_lit("""\
230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == 'comp.lang.python' and
date_str in ('20100101', '100101') and
time_str == '090000'):
self.push_lit('too long line' * 3000 +
'\n.')
else:
self.push_lit("""\
230 An empty list of newsarticles follows
.""")
# (Note for experiments: many servers disable NEWNEWS.
# As of this writing, sicinfo3.epfl.ch doesn't.)
def handle_XOVER(self, message_spec):
if message_spec == "57-59":
self.push_lit(
"224 Overview information for 57-58 follows\n"
"57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tSat, 19 Jun 2010 18:04:08 -0400"
"\t<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>"
"\t<hvalf7$ort$1@dough.gmane.org>\t7103\t16"
"\tXref: news.gmane.org gmane.comp.python.authors:57"
"\n"
"58\tLooking for a few good bloggers"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tThu, 22 Jul 2010 09:14:14 -0400"
"\t<A29863FA-F388-40C3-AA25-0FD06B09B5BF@gmail.com>"
"\t\t6683\t16"
"\t"
"\n"
# An UTF-8 overview line from fr.comp.lang.python
"59\tRe: Message d'erreur incompréhensible (par moi)"
"\tEric Brunel <eric.brunel@pragmadev.nospam.com>"
"\tWed, 15 Sep 2010 18:09:15 +0200"
"\t<eric.brunel-2B8B56.18091515092010@news.wanadoo.fr>"
"\t<4c90ec87$0$32425$ba4acef3@reader.news.orange.fr>\t1641\t27"
"\tXref: saria.nerim.net fr.comp.lang.python:1265"
"\n"
".\n")
else:
self.push_lit("""\
224 No articles
.""")
def handle_POST(self, *, body=None):
if body is None:
if self.allow_posting:
self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("440 Posting not permitted")
else:
assert self.allow_posting
self.push_lit("240 Article received OK")
self.posted_body = body
def handle_IHAVE(self, message_id, *, body=None):
if body is None:
if (self.allow_posting and
message_id == "<i.am.an.article.you.will.want@example.com>"):
self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("435 Article not wanted")
else:
assert self.allow_posting
self.push_lit("235 Article transferred OK")
self.posted_body = body
sample_head = """\
From: "Demo User" <nobody@example.net>
Subject: I am just a test article
Content-Type: text/plain; charset=UTF-8; format=flowed
Message-ID: <i.am.an.article.you.will.want@example.com>"""
sample_body = """\
This is just a test article.
..Here is a dot-starting line.
-- Signed by Andr\xe9."""
sample_article = sample_head + "\n\n" + sample_body
def handle_ARTICLE(self, message_spec=None):
if message_spec is None:
self.push_lit("220 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("220 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("220 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_article)
self.push_lit(".")
def handle_HEAD(self, message_spec=None):
if message_spec is None:
self.push_lit("221 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("221 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("221 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_head)
self.push_lit(".")
def handle_BODY(self, message_spec=None):
if message_spec is None:
self.push_lit("222 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("222 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("222 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_body)
self.push_lit(".")
def handle_AUTHINFO(self, cred_type, data):
if self._logged_in:
self.push_lit('502 Already Logged In')
elif cred_type == 'user':
if self._user_sent:
self.push_lit('482 User Credential Already Sent')
else:
self.push_lit('381 Password Required')
self._user_sent = True
elif cred_type == 'pass':
self.push_lit('281 Login Successful')
self._logged_in = True
else:
raise Exception('Unknown cred type {}'.format(cred_type))
class NNTPv2Handler(NNTPv1Handler):
"""A handler for RFC 3977 (NNTP "v2")"""
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1{}
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
READER
."""
if not self._logged_in:
self.push_lit(fmt.format('\n AUTHINFO USER'))
else:
self.push_lit(fmt.format(''))
def handle_MODE(self, _):
raise Exception('MODE READER sent despite READER has been advertised')
def handle_OVER(self, message_spec=None):
return self.handle_XOVER(message_spec)
class CapsAfterLoginNNTPv2Handler(NNTPv2Handler):
"""A handler that allows CAPABILITIES only after login"""
def handle_CAPABILITIES(self):
if not self._logged_in:
self.push_lit('480 You must log in.')
else:
super().handle_CAPABILITIES()
class ModeSwitchingNNTPv2Handler(NNTPv2Handler):
"""A server that starts in transit mode"""
def __init__(self):
self._switched = False
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
{}READER
."""
if self._switched:
self.push_lit(fmt.format(''))
else:
self.push_lit(fmt.format('MODE-'))
def handle_MODE(self, what):
assert not self._switched and what == 'reader'
self._switched = True
self.push_lit('200 Posting allowed')
class NNTPv1v2TestsMixin:
def setUp(self):
super().setUp()
def test_welcome(self):
self.assertEqual(self.server.welcome, self.handler.welcome)
def test_authinfo(self):
if self.nntp_version == 2:
self.assertIn('AUTHINFO', self.server._caps)
self.server.login('testuser', 'testpw')
# if AUTHINFO is gone from _caps we also know that getcapabilities()
# has been called after login as it should
self.assertNotIn('AUTHINFO', self.server._caps)
def test_date(self):
resp, date = self.server.date()
self.assertEqual(resp, "111 20100914001155")
self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55))
def test_quit(self):
self.assertFalse(self.sio.closed)
resp = self.server.quit()
self.assertEqual(resp, "205 Bye!")
self.assertTrue(self.sio.closed)
def test_help(self):
resp, help = self.server.help()
self.assertEqual(resp, "100 Legal commands")
self.assertEqual(help, [
' authinfo user Name|pass Password|generic <prog> <args>',
' date',
' help',
'Report problems to <root@example.org>',
])
def test_list(self):
resp, groups = self.server.list()
self.assertEqual(len(groups), 6)
g = groups[1]
self.assertEqual(g,
GroupInfo("comp.lang.python.announce", "0000001153",
"0000000993", "m"))
resp, groups = self.server.list("*distutils*")
self.assertEqual(len(groups), 2)
g = groups[0]
self.assertEqual(g,
GroupInfo("gmane.comp.python.distutils.devel", "0000014104",
"0000000001", "m"))
def test_stat(self):
resp, art_num, message_id = self.server.stat(3000234)
self.assertEqual(resp, "223 3000234 <45223423@example.com>")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
resp, art_num, message_id = self.server.stat("<45223423@example.com>")
self.assertEqual(resp, "223 0 <45223423@example.com>")
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat("<non.existent.id>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat()
self.assertEqual(cm.exception.response, "412 No newsgroup selected")
def test_next(self):
resp, art_num, message_id = self.server.next()
self.assertEqual(resp, "223 3000237 <668929@example.org> retrieved")
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<668929@example.org>")
def test_last(self):
resp, art_num, message_id = self.server.last()
self.assertEqual(resp, "223 3000234 <45223423@example.com> retrieved")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
def test_description(self):
desc = self.server.description("comp.lang.python")
self.assertEqual(desc, "The Python computer language.")
desc = self.server.description("comp.lang.pythonx")
self.assertEqual(desc, "")
def test_descriptions(self):
resp, groups = self.server.descriptions("comp.lang.python")
self.assertEqual(resp, '215 Descriptions in form "group description".')
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
})
resp, groups = self.server.descriptions("comp.lang.python*")
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
"comp.lang.python.announce": "Announcements about the Python language. (Moderated)",
})
resp, groups = self.server.descriptions("comp.lang.pythonx")
self.assertEqual(groups, {})
def test_group(self):
resp, count, first, last, group = self.server.group("fr.comp.lang.python")
self.assertTrue(resp.startswith("211 "), resp)
self.assertEqual(first, 761)
self.assertEqual(last, 1265)
self.assertEqual(count, 486)
self.assertEqual(group, "fr.comp.lang.python")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.group("comp.lang.python.devel")
exc = cm.exception
self.assertTrue(exc.response.startswith("411 No such group"),
exc.response)
def test_newnews(self):
# NEWNEWS comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("comp.lang.python", dt)
expected = (
"230 list of newsarticles (NNTP v{0}) "
"created after Mon Sep 13 08:20:04 2010 follows"
).format(self.nntp_version)
self.assertEqual(resp, expected)
self.assertEqual(ids, [
"<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>",
"<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>",
])
# NEWNEWS fr.comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("fr.comp.lang.python", dt)
self.assertEqual(resp, "230 An empty list of newsarticles follows")
self.assertEqual(ids, [])
def _check_article_body(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.")
self.assertEqual(lines[-2], b"")
self.assertEqual(lines[-3], b".Here is a dot-starting line.")
self.assertEqual(lines[-4], b"This is just a test article.")
def _check_article_head(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>')
self.assertEqual(lines[3], b"Message-ID: <i.am.an.article.you.will.want@example.com>")
def _check_article_data(self, lines):
self.assertEqual(len(lines), 9)
self._check_article_head(lines[:4])
self._check_article_body(lines[-4:])
self.assertEqual(lines[4], b"")
def test_article(self):
# ARTICLE
resp, info = self.server.article()
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE num
resp, info = self.server.article(3000234)
self.assertEqual(resp, "220 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE id
resp, info = self.server.article("<45223423@example.com>")
self.assertEqual(resp, "220 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.article("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_article_file(self):
# With a "file" argument
f = io.BytesIO()
resp, info = self.server.article(file=f)
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_head(self):
# HEAD
resp, info = self.server.head()
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD num
resp, info = self.server.head(3000234)
self.assertEqual(resp, "221 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD id
resp, info = self.server.head("<45223423@example.com>")
self.assertEqual(resp, "221 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.head("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_head_file(self):
f = io.BytesIO()
resp, info = self.server.head(file=f)
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertFalse(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_body(self):
# BODY
resp, info = self.server.body()
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY num
resp, info = self.server.body(3000234)
self.assertEqual(resp, "222 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY id
resp, info = self.server.body("<45223423@example.com>")
self.assertEqual(resp, "222 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.body("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_body_file(self):
f = io.BytesIO()
resp, info = self.server.body(file=f)
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertFalse(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def check_over_xover_resp(self, resp, overviews):
self.assertTrue(resp.startswith("224 "), resp)
self.assertEqual(len(overviews), 3)
art_num, over = overviews[0]
self.assertEqual(art_num, 57)
self.assertEqual(over, {
"from": "Doug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>",
"subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout",
"date": "Sat, 19 Jun 2010 18:04:08 -0400",
"message-id": "<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>",
"references": "<hvalf7$ort$1@dough.gmane.org>",
":bytes": "7103",
":lines": "16",
"xref": "news.gmane.org gmane.comp.python.authors:57"
})
art_num, over = overviews[1]
self.assertEqual(over["xref"], None)
art_num, over = overviews[2]
self.assertEqual(over["subject"],
"Re: Message d'erreur incompréhensible (par moi)")
def test_xover(self):
resp, overviews = self.server.xover(57, 59)
self.check_over_xover_resp(resp, overviews)
def test_over(self):
# In NNTP "v1", this will fallback on XOVER
resp, overviews = self.server.over((57, 59))
self.check_over_xover_resp(resp, overviews)
sample_post = (
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n'
b'Message-ID: <i.am.an.article.you.will.want@example.com>\r\n'
b'\r\n'
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
)
def _check_posted_body(self):
# Check the raw body as received by the server
lines = self.handler.posted_body
# One additional line for the "." terminator
self.assertEqual(len(lines), 10)
self.assertEqual(lines[-1], b'.\r\n')
self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n')
self.assertEqual(lines[-3], b'\r\n')
self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n')
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>\r\n')
def _check_post_ihave_sub(self, func, *args, file_factory):
# First the prepared post with CRLF endings
post = self.sample_post
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
# Then the same post with "normal" line endings - they should be
# converted by NNTP.post and NNTP.ihave.
post = self.sample_post.replace(b"\r\n", b"\n")
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
return resp
def check_post_ihave(self, func, success_resp, *args):
# With a bytes object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytes)
self.assertEqual(resp, success_resp)
# With a bytearray object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray)
self.assertEqual(resp, success_resp)
# With a file object
resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO)
self.assertEqual(resp, success_resp)
# With an iterable of terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=True))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
# With an iterable of non-terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=False))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
def test_post(self):
self.check_post_ihave(self.server.post, "240 Article received OK")
self.handler.allow_posting = False
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.post(self.sample_post)
self.assertEqual(cm.exception.response,
"440 Posting not permitted")
def test_ihave(self):
self.check_post_ihave(self.server.ihave, "235 Article transferred OK",
"<i.am.an.article.you.will.want@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.ihave("<another.message.id>", self.sample_post)
self.assertEqual(cm.exception.response,
"435 Article not wanted")
def test_too_long_lines(self):
dt = datetime.datetime(2010, 1, 1, 9, 0, 0)
self.assertRaises(nntplib.NNTPDataError,
self.server.newnews, "comp.lang.python", dt)
class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v1 server (no capabilities)."""
nntp_version = 1
handler_class = NNTPv1Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {})
self.assertEqual(self.server.nntp_version, 1)
self.assertEqual(self.server.nntp_implementation, None)
class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v2 server (with capabilities)."""
nntp_version = 2
handler_class = NNTPv2Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {
'VERSION': ['2', '3'],
'IMPLEMENTATION': ['INN', '2.5.1'],
'AUTHINFO': ['USER'],
'HDR': [],
'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS',
'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'],
'OVER': [],
'POST': [],
'READER': [],
})
self.assertEqual(self.server.nntp_version, 3)
self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1')
class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase):
"""Tests a probably NNTP v2 server with capabilities only after login."""
nntp_version = 2
handler_class = CapsAfterLoginNNTPv2Handler
def test_caps_only_after_login(self):
self.assertEqual(self.server._caps, {})
self.server.login('testuser', 'testpw')
self.assertIn('VERSION', self.server._caps)
class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin,
unittest.TestCase):
"""Same tests as for v2 but we tell NTTP to send MODE READER to a server
that isn't in READER mode by default."""
nntp_version = 2
handler_class = ModeSwitchingNNTPv2Handler
def test_we_are_in_reader_mode_after_connect(self):
self.assertIn('READER', self.server._caps)
class MiscTests(unittest.TestCase):
def test_decode_header(self):
def gives(a, b):
self.assertEqual(nntplib.decode_header(a), b)
gives("" , "")
gives("a plain header", "a plain header")
gives(" with extra spaces ", " with extra spaces ")
gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python")
gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?="
" =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=",
"Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées")
gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=",
"Re: problème de matrice")
# A natively utf-8 header (found in the real world!)
gives("Re: Message d'erreur incompréhensible (par moi)",
"Re: Message d'erreur incompréhensible (par moi)")
def test_parse_overview_fmt(self):
# The minimal (default) response
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# The minimal response using alternative names
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# Variations in casing
lines = ["subject:", "FROM:", "DaTe:", "message-ID:",
"References:", "BYTES:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# First example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines", "Xref:full",
"Distribution:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# Second example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:FULL",
"Distribution:FULL"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# A classic response from INN
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref"])
def test_parse_overview(self):
fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"]
# First example from RFC 3977
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\tXref: news.example.com misc.test:3000363',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(art_num, 3000234)
self.assertEqual(fields, {
'subject': 'I am just a test article',
'from': '"Demo User" <nobody@example.com>',
'date': '6 Oct 1998 04:38:40 -0500',
'message-id': '<45223423@example.com>',
'references': '<45454@example.net>',
':bytes': '1234',
':lines': '17',
'xref': 'news.example.com misc.test:3000363',
})
# Second example; here the "Xref" field is totally absent (including
# the header name) and comes out as None
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\t\t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['xref'], None)
# Third example; the "Xref" is an empty string, while "references"
# is a single space.
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t \t1234\t'
'17\tXref: \t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['references'], ' ')
self.assertEqual(fields['xref'], '')
def test_parse_datetime(self):
def gives(a, b, *c):
self.assertEqual(nntplib._parse_datetime(a, b),
datetime.datetime(*c))
# Output of DATE command
gives("19990623135624", None, 1999, 6, 23, 13, 56, 24)
# Variations
gives("19990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("090623", "135624", 2009, 6, 23, 13, 56, 24)
def test_unparse_datetime(self):
# Test non-legacy mode
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "19990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "20000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "20100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, "19990623", "000000")
gives(2000, 6, 23, "20000623", "000000")
gives(2010, 6, 5, "20100605", "000000")
def test_unparse_datetime_legacy(self):
# Test legacy mode (RFC 977)
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, "990623", "000000")
gives(2000, 6, 23, "000623", "000000")
gives(2010, 6, 5, "100605", "000000")
def test_main():
tests = [MiscTests, NNTPv1Tests, NNTPv2Tests, CapsAfterLoginNNTPv2Tests,
SendReaderNNTPv2Tests, NetworkedNNTPTests]
if _have_ssl:
tests.append(NetworkedNNTP_SSLTests)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
Ophiuchus1312/enigma2-master | lib/python/Screens/TimerEntry.py | 1 | 22034 | # -*- coding: utf-8 -*-
from Screens.Screen import Screen
import ChannelSelection
from ServiceReference import ServiceReference
from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Button import Button
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import defaultMoviePath
from Screens.MovieSelection import getPreferredTagEditor
from Screens.LocationBox import MovieLocationBox
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Setup import SetupSummary
from RecordTimer import AFTEREVENT
from enigma import eEPGCache, eServiceReference
from time import localtime, mktime, time, strftime
from datetime import datetime
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.setup_title = _("Timer entry")
self.timer = timer
self.entryDate = None
self.entryService = None
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["description"] = Label("")
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd
}, -2)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.createSetup("config")
self.onLayoutFinish.append(self.layoutFinished)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def createConfig(self):
justplay = self.timer.justplay
always_zap = self.timer.always_zap
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.AUTO: "auto"
}[self.timer.afterEvent]
if self.timer.record_ecm and self.timer.descramble:
recordingtype = "descrambled+ecm"
elif self.timer.record_ecm:
recordingtype = "scrambled+ecm"
elif self.timer.descramble:
recordingtype = "normal"
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
# calculate default values
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
if self.timer.repeated: # repeated
type = "repeated"
if (self.timer.repeated == 31): # Mon-Fri
repeated = "weekdays"
elif (self.timer.repeated == 127): # daily
repeated = "daily"
else:
flags = self.timer.repeated
repeated = "user"
count = 0
for x in (0, 1, 2, 3, 4, 5, 6):
if flags == 1: # weekly
# print "Set to weekday " + str(x)
weekday = x
if flags & 1 == 1: # set user defined flags
day[x] = 1
count += 1
else:
day[x] = 0
flags = flags >> 1
if count == 1:
repeated = "weekly"
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
self.timerentry_justplay = ConfigSelection(choices = [
("zap", _("zap")), ("record", _("record")), ("zap+record", _("zap and record"))],
default = {0: "record", 1: "zap", 2: "zap+record"}[justplay + 2*always_zap])
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("auto", _("auto"))], default = afterevent)
self.timerentry_recordingtype = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = recordingtype)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_name = ConfigText(default = self.timer.name.replace('\xc2\x86', '').replace('\xc2\x87', '').encode("utf-8"), visible_width = 50, fixed_size = False)
self.timerentry_description = ConfigText(default = self.timer.description, visible_width = 50, fixed_size = False)
self.timerentry_tags = self.timer.tags[:]
self.timerentry_tagsset = ConfigSelection(choices = [not self.timerentry_tags and "None" or " ".join(self.timerentry_tags)])
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("daily", _("daily")), ("weekly", _("weekly")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = ((self.timer.end - self.timer.begin) > 4), choices = [(True, _("yes")), (False, _("no"))])
default = self.timer.dirname or defaultMoviePath()
tmp = config.movielist.videodirs.getValue()
if default not in tmp:
tmp.append(default)
self.timerentry_dirname = ConfigSelection(default = default, choices = tmp)
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(self.timer.service_ref.getServiceName())
except:
pass
self.timerentry_service_ref = self.timer.service_ref
self.timerentry_service = ConfigSelection([servicename])
def createSetup(self, widget):
self.list = []
self.list.append(getConfigListEntry(_("Name"), self.timerentry_name, _("Set the name the recording will get.")))
self.list.append(getConfigListEntry(_("Description"), self.timerentry_description, _("Set the description of the recording.")))
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay, _("Chose between record and ZAP."))
self.list.append(self.timerJustplayEntry)
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type, _("A repeating timer or just once?"))
self.list.append(self.timerTypeEntry)
if self.timerentry_type.getValue() == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated, _("Choose between Daily, Weekly, Weekdays or self defined."))
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate, _("Set the date the timer must start."))
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.getValue() == "daily":
pass
if self.timerentry_repeated.getValue() == "weekdays":
pass
if self.timerentry_repeated.getValue() == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.getValue() == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date, _("Set the date the timer must start."))
if self.timerentry_type.getValue() == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime, _("Set the time the timer must start."))
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime, _("Set the time the timer must stop."))
if self.timerentry_justplay.getValue() == "zap":
self.list.append(self.entryShowEndTime)
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime, _("Set the time the timer must stop."))
if self.timerentry_justplay.getValue() != "zap" or self.timerentry_showendtime.getValue():
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service, _("Set the channel for this timer."))
self.list.append(self.channelEntry)
self.dirname = getConfigListEntry(_("Location"), self.timerentry_dirname, _("Where should the recording be saved?"))
self.tagsSet = getConfigListEntry(_("Tags"), self.timerentry_tagsset, _("Choose a tag for easy finding a recording."))
if self.timerentry_justplay.getValue() != "zap":
if config.usage.setup_level.index >= 2: # expert+
self.list.append(self.dirname)
if getPreferredTagEditor():
self.list.append(self.tagsSet)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent, _("What action is required on complettion of the timer? 'Auto' lets the box return to the state it had when the timer started. 'Do nothing', 'Go to standby' and 'Go to deep standby' do ecaxtly that.")))
self.list.append(getConfigListEntry(_("Recording type"), self.timerentry_recordingtype, _("Descramble & record ECM' gives the option to descramble afterwards if descrambling on recording failed. 'Don't descramble, record ECM' save a scramble recording that can be descrambled on playback. 'Normal' means descramble the recording and don't record ECM.")))
self[widget].list = self.list
self[widget].l.setList(self.list)
def selectionChanged(self):
if self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2]:
self["description"].setText(self["config"].getCurrent()[2])
def layoutFinished(self):
self.setTitle(_(self.setup_title))
def createSummary(self):
return SetupSummary
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent() and self["config"].getCurrent()[0] or ""
def getCurrentValue(self):
return self["config"].getCurrent() and str(self["config"].getCurrent()[1].getText()) or ""
def newConfig(self):
if self["config"].getCurrent() in (self.timerTypeEntry, self.timerJustplayEntry, self.frequencyEntry, self.entryShowEndTime):
self.createSetup("config")
def keyLeft(self):
if self["config"].getCurrent() in (self.channelEntry, self.tagsSet):
self.keySelect()
else:
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
if self["config"].getCurrent() in (self.channelEntry, self.tagsSet):
self.keySelect()
else:
ConfigListScreen.keyRight(self)
self.newConfig()
def keySelect(self):
cur = self["config"].getCurrent()
if cur == self.channelEntry:
self.session.openWithCallback(
self.finishedChannelSelection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from")
)
elif config.usage.setup_level.index >= 2 and cur == self.dirname:
self.session.openWithCallback(
self.pathSelected,
MovieLocationBox,
_("Select target folder"),
self.timerentry_dirname.getValue(),
minFree = 100 # We require at least 100MB free space
)
elif getPreferredTagEditor() and cur == self.tagsSet:
self.session.openWithCallback(
self.tagEditFinished,
getPreferredTagEditor(),
self.timerentry_tags
)
else:
self.keyGo()
def finishedChannelSelection(self, *args):
if args:
self.timerentry_service_ref = ServiceReference(args[0])
self.timerentry_service.setCurrentText(self.timerentry_service_ref.getServiceName())
self["config"].invalidate(self.channelEntry)
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.getValue()
endtime = self.timerentry_endtime.getValue()
starttime = self.timerentry_starttime.getValue()
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
# if the timer type is a Zap and no end is set, set duration to 1 second so time is shown in EPG's.
if self.timerentry_justplay.getValue() == "zap":
if not self.timerentry_showendtime.getValue():
end = begin + (config.recording.margin_before.getValue()*60) + 1
return begin, end
def selectChannelSelector(self, *args):
self.session.openWithCallback(
self.finishedChannelSelectionCorrection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from")
)
def finishedChannelSelectionCorrection(self, *args):
if args:
self.finishedChannelSelection(*args)
self.keyGo()
def keyGo(self, result = None):
if not self.timerentry_service_ref.isRecordable():
self.session.openWithCallback(self.selectChannelSelector, MessageBox, _("You didn't select a channel to record from."), MessageBox.TYPE_ERROR)
return
self.timer.name = self.timerentry_name.getValue()
self.timer.description = self.timerentry_description.getValue()
self.timer.justplay = self.timerentry_justplay.getValue() == "zap"
self.timer.always_zap = self.timerentry_justplay.value == "zap+record"
if self.timerentry_justplay.getValue() == "zap":
if not self.timerentry_showendtime.getValue():
self.timerentry_endtime.value = self.timerentry_starttime.getValue()
self.timer.resetRepeated()
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"standby": AFTEREVENT.STANDBY,
"auto": AFTEREVENT.AUTO
}[self.timerentry_afterevent.value]
self.timer.descramble = {
"normal": True,
"descrambled+ecm": True,
"scrambled+ecm": False,
}[self.timerentry_recordingtype.value]
self.timer.record_ecm = {
"normal": False,
"descrambled+ecm": True,
"scrambled+ecm": True,
}[self.timerentry_recordingtype.value]
self.timer.service_ref = self.timerentry_service_ref
self.timer.tags = self.timerentry_tags
if self.timer.dirname or self.timerentry_dirname.getValue() != defaultMoviePath():
self.timer.dirname = self.timerentry_dirname.getValue()
config.movielist.last_timer_videodir.value = self.timer.dirname
config.movielist.last_timer_videodir.save()
if self.timerentry_type.getValue() == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_type.getValue() == "repeated":
if self.timerentry_repeated.getValue() == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.getValue() == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.getValue() == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.getValue() == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].getValue():
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_starttime.getValue())
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_starttime.getValue())
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_endtime.getValue())
else:
self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.getValue())
self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.getValue())
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
if self.timer.eit is not None:
event = eEPGCache.getInstance().lookupEventId(self.timer.service_ref.ref, self.timer.eit)
if event:
n = event.getNumOfLinkageServices()
if n > 1:
tlist = []
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
parent = self.timer.service_ref.ref
selection = 0
for x in range(n):
i = event.getLinkageService(parent, x)
if i.toString() == ref.toString():
selection = x
tlist.append((i.getName(), i))
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a subservice to record..."), list = tlist, selection = selection)
return
elif n > 0:
parent = self.timer.service_ref.ref
self.timer.service_ref = ServiceReference(event.getLinkageService(parent, 0))
self.saveTimer()
self.close((True, self.timer))
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value = self.timerentry_date.value + 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value = self.timerentry_date.value - 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def subserviceSelected(self, service):
if not service is None:
self.timer.service_ref = ServiceReference(service[1])
self.saveTimer()
self.close((True, self.timer))
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
def keyCancel(self):
self.close((False,))
def pathSelected(self, res):
if res is not None:
if config.movielist.videodirs.getValue() != self.timerentry_dirname.choices:
self.timerentry_dirname.setChoices(config.movielist.videodirs.getValue(), default=res)
self.timerentry_dirname.value = res
def tagEditFinished(self, ret):
if ret is not None:
self.timerentry_tags = ret
self.timerentry_tagsset.setChoices([not ret and "None" or " ".join(ret)])
self["config"].invalidate(self.tagsSet)
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["key_red"] = Button(_("Delete entry"))
self["key_green"] = Button()
self["key_yellow"] = Button("")
self["key_blue"] = Button(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("Timer log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
class InstantRecordTimerEntry(TimerEntry):
def __init__(self, session, timer, zap):
Screen.__init__(self, session)
self.setup_title = ""
self.timer = timer
self.timer.justplay = zap
self.entryDate = None
self.entryService = None
self.keyGo()
def keyGo(self, result = None):
if self.timer.justplay:
self.timer.end = self.timer.begin + (config.recording.margin_before.getValue() * 60) + 1
self.timer.resetRepeated()
self.saveTimer()
def retval(self):
return self.timer
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
| gpl-2.0 |
mistercrunch/airflow | airflow/providers/apache/hive/transfers/hive_to_samba.py | 7 | 2867 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from Hive to Samba."""
from tempfile import NamedTemporaryFile
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
from airflow.providers.samba.hooks.samba import SambaHook
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
class HiveToSambaOperator(BaseOperator):
"""
Executes hql code in a specific Hive database and loads the
results of the query as a csv to a Samba location.
:param hql: the hql to be exported. (templated)
:type hql: str
:param destination_filepath: the file path to where the file will be pushed onto samba
:type destination_filepath: str
:param samba_conn_id: reference to the samba destination
:type samba_conn_id: str
:param hiveserver2_conn_id: reference to the hiveserver2 service
:type hiveserver2_conn_id: str
"""
template_fields = ('hql', 'destination_filepath')
template_ext = (
'.hql',
'.sql',
)
@apply_defaults
def __init__(
self,
*,
hql: str,
destination_filepath: str,
samba_conn_id: str = 'samba_default',
hiveserver2_conn_id: str = 'hiveserver2_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hiveserver2_conn_id = hiveserver2_conn_id
self.samba_conn_id = samba_conn_id
self.destination_filepath = destination_filepath
self.hql = hql.strip().rstrip(';')
def execute(self, context):
with NamedTemporaryFile() as tmp_file:
self.log.info("Fetching file from Hive")
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
hive.to_csv(hql=self.hql, csv_filepath=tmp_file.name, hive_conf=context_to_airflow_vars(context))
self.log.info("Pushing to samba")
samba = SambaHook(samba_conn_id=self.samba_conn_id)
samba.push_from_local(self.destination_filepath, tmp_file.name)
| apache-2.0 |
faribas/RMG-Py | mergeModels.py | 4 | 4047 | #!/usr/bin/env python
# encoding: utf-8
"""
This script enables the automatic merging of two or more Chemkin files (and
associated species dictionaries) into a single unified Chemkin file. Simply
pass the paths of the Chemkin files and species dictionaries on the
command-line, e.g.
$ python mergeModels.py /path/to/chem1.inp /path/to/species_dictionary1.txt /path/to/chem2.inp /path/to/species_dictionary2.txt
The resulting merged files are placed in ``chem.inp`` and
``species_dictionary.txt`` in the execution directory.
"""
import os.path
import argparse
from rmgpy.chemkin import loadChemkinFile, saveChemkinFile, saveSpeciesDictionary, saveTransportFile
from rmgpy.reaction import ReactionModel
################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model1', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the first model to merge')
parser.add_argument('--model2', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the second model to merge')
parser.add_argument('--model3', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the third model to merge')
parser.add_argument('--model4', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the fourth model to merge')
parser.add_argument('--model5', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the fifth model to merge')
args = parser.parse_args()
transport = False
inputModelFiles = []
for model in [args.model1, args.model2, args.model3, args.model4, args.model5]:
if model is None: continue
if len(model) == 2:
inputModelFiles.append((model[0], model[1], None))
elif len(model) == 3:
transport = True
inputModelFiles.append((model[0], model[1], model[2]))
else:
raise Exception
outputChemkinFile = 'chem.inp'
outputSpeciesDictionary = 'species_dictionary.txt'
outputTransportFile = 'tran.dat' if transport else None
# Load the models to merge
models = []
for chemkin, speciesPath, transportPath in inputModelFiles:
print 'Loading model #{0:d}...'.format(len(models)+1)
model = ReactionModel()
model.species, model.reactions = loadChemkinFile(chemkin, speciesPath, transportPath=transportPath)
models.append(model)
finalModel = ReactionModel()
for i, model in enumerate(models):
print 'Ignoring common species and reactions from model #{0:d}...'.format(i+1)
Nspec0 = len(finalModel.species)
Nrxn0 = len(finalModel.reactions)
finalModel = finalModel.merge(model)
Nspec = len(finalModel.species)
Nrxn = len(finalModel.reactions)
print 'Added {1:d} out of {2:d} ({3:.1f}%) unique species from model #{0:d}.'.format(i+1, Nspec - Nspec0, len(model.species), (Nspec - Nspec0) * 100. / len(model.species))
print 'Added {1:d} out of {2:d} ({3:.1f}%) unique reactions from model #{0:d}.'.format(i+1, Nrxn - Nrxn0, len(model.reactions), (Nrxn - Nrxn0) * 100. / len(model.reactions))
print 'The merged model has {0:d} species and {1:d} reactions'.format(len(finalModel.species), len(finalModel.reactions))
# Save the merged model to disk
saveChemkinFile(outputChemkinFile, finalModel.species, finalModel.reactions)
saveSpeciesDictionary(outputSpeciesDictionary, finalModel.species)
if transport:
saveTransportFile(outputTransportFile, finalModel.species)
print 'Merged Chemkin file saved to {0}'.format(outputChemkinFile)
print 'Merged species dictionary saved to {0}'.format(outputSpeciesDictionary)
if transport:
print 'Merged transport file saved to {0}'.format(outputTransportFile)
| mit |
lixiangning888/whole_project | analyzer/windows/modules/packages/html.py | 1 | 1174 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import shutil
import logging
from lib.common.abstracts import Package
log = logging.getLogger(__name__)
class HTML(Package):
"""HTML file analysis package."""
PATHS = [
("ProgramFiles", "Internet Explorer", "iexplore.exe"),
]
def start(self, path):
iexplore = self.get_path("browser")
# Travelling inside malware universe you should bring a towel with you.
# If a file detected as HTML is submitted without a proper extension,
# or without an extension at all (are you used to name samples with hash?),
# IE is going to open it as a text file, so your precious sample will not
# be executed.
# We help you sample to execute renaming it with a proper extension.
if not path.endswith((".htm", ".html")):
shutil.copy(path, path + ".html")
path += ".html"
log.info("Submitted file is missing extension, adding .html")
return self.execute(iexplore, "\"%s\"" % path, path)
| lgpl-3.0 |
AltarBeastiful/qt-creator | tests/system/suite_APTW/tst_APTW03/test.py | 4 | 7796 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def handleInsertVirtualFunctions(expected):
treeView = waitForObject("{container={title='Functions to insert:' type='QGroupBox' unnamed='1'"
" visible='1'} type='QTreeView' unnamed='1' visible='1'}")
model = treeView.model()
classIndices = dumpIndices(model, treeView.rootIndex())
found = set()
isChecked = lambda ch: model.data(ch, Qt.CheckStateRole).toInt() == Qt.Checked
for classIndex in classIndices:
if model.hasChildren(classIndex):
for child in dumpIndices(model, classIndex):
for curr in expected:
if str(child.text).startswith(curr):
if test.verify(isChecked(child), "Verifying: '%s' is checked." % curr):
found.add(curr)
else:
item = "%s.%s" % (str(classIndex.text), str(child.text))
test.log("Checking '%s'." % item)
clickItem(treeView, item.replace("_", "\\_"), 5, 5, 0, Qt.LeftButton)
waitFor("isChecked(child)", 1000)
test.verify(len(set(expected).difference(found)) == 0,
"Verifying whether all expected functions have been found.")
selectFromCombo("{container={title='Insertion options:' type='QGroupBox' unnamed='1' "
" visible='1'} type='QComboBox' unnamed='1' visible='1'}",
"Insert definitions in implementation file")
clickButton("{text='OK' type='QPushButton' unnamed='1' visible='1'}")
def checkSimpleCppLib(projectName, static):
checkedTargets, projectName, className = createNewCPPLib(tempDir(), projectName, "MyClass",
target=Targets.desktopTargetClasses(),
isStatic=static)
for kit, config in iterateBuildConfigs(len(checkedTargets), "Release"):
verifyBuildConfig(len(checkedTargets), kit, config, False, True)
invokeMenuItem('Build', 'Build Project "%s"' % projectName)
waitForCompile(10000)
checkCompile()
def addReturn(editor, toFunction, returnValue):
placeCursorToLine(editor, toFunction, True)
type(editor, "<Down>")
type(editor, "<Return>")
type(editor, "return %s;" % returnValue)
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
checkSimpleCppLib("SampleApp1", False)
checkSimpleCppLib("SampleApp2", True)
# Qt Plugin needs Qt4.8 for QGenericPlugin which is tested by default
targets = Targets.desktopTargetClasses() ^ Targets.DESKTOP_474_GCC
checkedTargets, projectName, className = createNewQtPlugin(tempDir(), "SampleApp3", "MyPlugin",
target=targets)
is12251Open = JIRA.isBugStillOpen(12251)
virtualFunctionsAdded = False
for kit, config in iterateBuildConfigs(len(checkedTargets), "Debug"):
verifyBuildConfig(len(checkedTargets), kit, config, True, True)
if (virtualFunctionsAdded and is12251Open and platform.system() in ('Microsoft', 'Windows')
and "480" in Targets.getStringForTarget(checkedTargets[kit])):
test.warning("Skipping building of Qt4.8 targets because of QTCREATORBUG-12251.")
continue
invokeMenuItem('Build', 'Build Project "%s"' % projectName)
waitForCompile(10000)
if not virtualFunctionsAdded:
checkLastBuild(True, False)
if not openDocument("%s.Headers.%s\.h" % (projectName, className.lower())):
test.fail("Could not open %s.h - continuing." % className.lower())
continue
editor = getEditorForFileSuffix("%s.h" % className.lower())
oldContent = str(editor.plainText)
placeCursorToLine(editor, "class %s.*" % className, True)
snooze(1) # avoid timing issue with the parser
invokeContextMenuItem(editor, "Refactor", "Insert Virtual Functions of Base Classes")
handleInsertVirtualFunctions(["keys() const = 0 : QStringList",
"create(const QString &, const QString &) = 0 : QObject *"])
waitFor("'keys' in str(editor.plainText)", 2000)
modifiedContent = str(editor.plainText)
test.verify(re.search("QStringList keys.*;", modifiedContent, re.MULTILINE),
"Verifying whether keys() declaration has been added to the header.")
test.verify(re.search("QObject \*create.*;", modifiedContent, re.MULTILINE),
"Verifying whether create() declaration has been added to the header.")
if not openDocument("%s.Sources.%s\.cpp" % (projectName, className.lower())):
test.fail("Could not open %s.cpp - continuing." % className.lower())
continue
editor = getEditorForFileSuffix("%s.cpp" % className.lower())
modifiedContent = str(editor.plainText)
test.verify("QStringList %s::keys(" % className in modifiedContent,
"Verifying whether keys() definition has been added to the source file.")
test.verify("QObject *%s::create(" % className in modifiedContent,
"Verifying whether create() definition has been added to the source file.")
# add return to not run into build issues of missing return values
addReturn(editor, "QStringList %s::keys.*" % className, "QStringList()")
addReturn(editor, "QObject \*%s::create.*" % className, "0")
virtualFunctionsAdded = True
invokeMenuItem('File', 'Save All')
if (is12251Open and platform.system() in ('Microsoft', 'Windows')
and "480" in Targets.getStringForTarget(checkedTargets[kit])):
test.warning("Skipping building of Qt4.8 targets because of QTCREATORBUG-12251.")
continue
invokeMenuItem('Build', 'Rebuild Project "%s"' % projectName)
waitForCompile(10000)
checkCompile()
invokeMenuItem("File", "Exit")
| lgpl-2.1 |
MKTCloud/MKTCloud | openstack_dashboard/dashboards/admin/images_and_snapshots/snapshots/views.py | 10 | 2163 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instance snapshots.
"""
import logging
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from .forms import CreateSnapshot
LOG = logging.getLogger(__name__)
class CreateView(forms.ModalFormView):
form_class = CreateSnapshot
template_name = 'project/images_and_snapshots/snapshots/create.html'
success_url = reverse_lazy("horizon:project:images_and_snapshots:index")
def get_object(self):
if not hasattr(self, "_object"):
try:
self._object = api.nova.server_get(self.request,
self.kwargs["instance_id"])
except:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_("Unable to retrieve instance."),
redirect=redirect)
return self._object
def get_initial(self):
return {"instance_id": self.kwargs["instance_id"]}
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['instance'] = self.get_object()
return context
| apache-2.0 |
fedorpatlin/ansible | lib/ansible/modules/cloud/openstack/os_user.py | 26 | 9043 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user
short_description: Manage OpenStack Identity Users
extends_documentation_fragment: openstack
author: David Shrewsbury
version_added: "2.0"
description:
- Manage OpenStack Identity users. Users can be created,
updated or deleted using this module. A user will be updated
if I(name) matches an existing user and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the user.
options:
name:
description:
- Username for the user
required: true
password:
description:
- Password for the user
required: false
default: None
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.3"
description:
- C(always) will attempt to update password. C(on_create) will only
set the password for newly created users.
email:
description:
- Email address for the user
required: false
default: None
default_project:
description:
- Project name or ID that the user should be associated with by default
required: false
default: None
domain:
description:
- Domain to create the user in if the cloud supports domains
required: false
default: None
enabled:
description:
- Is the user enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a user
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
email: demo@example.com
domain: default
default_project: demo
# Delete a user
- os_user:
cloud: mycloud
state: absent
name: demouser
# Create a user but don't update password if user exists
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
update_password: on_create
email: demo@example.com
domain: default
default_project: demo
'''
RETURN = '''
user:
description: Dictionary describing the user.
returned: On success when I(state) is 'present'
type: dictionary
contains:
default_project_id:
description: User default project ID. Only present with Keystone >= v3.
type: string
sample: "4427115787be45f08f0ec22a03bfc735"
domain_id:
description: User domain ID. Only present with Keystone >= v3.
type: string
sample: "default"
email:
description: User email address
type: string
sample: "demo@example.com"
id:
description: User ID
type: string
sample: "f59382db809c43139982ca4189404650"
name:
description: User name
type: string
sample: "demouser"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(params_dict, user):
for k, v in params_dict.items():
if k not in ('password', 'update_password') and user[k] != v:
return True
# We don't get password back in the user object, so assume any supplied
# password is a change.
if (params_dict['password'] is not None and
params_dict['update_password'] == 'always'):
return True
return False
def _get_domain_id(cloud, domain):
try:
# We assume admin is passing domain id
domain_id = cloud.get_domain(domain)['id']
except:
# If we fail, maybe admin is passing a domain name.
# Note that domains have unique names, just like id.
try:
domain_id = cloud.search_domains(filters={'name': domain})[0]['id']
except:
# Ok, let's hope the user is non-admin and passing a sane id
domain_id = domain
return domain_id
def _get_default_project_id(cloud, default_project):
project = cloud.get_project(default_project)
if not project:
module.fail_json(msg='Default project %s is not valid' % default_project)
return project['id']
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
password=dict(required=False, default=None, no_log=True),
email=dict(required=False, default=None),
default_project=dict(required=False, default=None),
domain=dict(required=False, default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default='always', choices=['always',
'on_create']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
password = module.params.pop('password')
email = module.params['email']
default_project = module.params['default_project']
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
update_password = module.params['update_password']
try:
cloud = shade.openstack_cloud(**module.params)
user = cloud.get_user(name)
domain_id = None
if domain:
opcloud = shade.operator_cloud(**module.params)
domain_id = _get_domain_id(opcloud, domain)
if state == 'present':
if update_password in ('always', 'on_create'):
if not password:
msg = ("update_password is %s but a password value is "
"missing") % update_password
module.fail_json(msg=msg)
default_project_id = None
if default_project:
default_project_id = _get_default_project_id(cloud, default_project)
if user is None:
user = cloud.create_user(
name=name, password=password, email=email,
default_project=default_project_id, domain_id=domain_id,
enabled=enabled)
changed = True
else:
params_dict = {'email': email, 'enabled': enabled,
'password': password,
'update_password': update_password}
if domain_id is not None:
params_dict['domain_id'] = domain_id
if default_project_id is not None:
params_dict['default_project_id'] = default_project_id
if _needs_update(params_dict, user):
if update_password == 'always':
user = cloud.update_user(
user['id'], password=password, email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
else:
user = cloud.update_user(
user['id'], email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, user=user)
elif state == 'absent':
if user is None:
changed=False
else:
cloud.delete_user(user['id'])
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
shepdelacreme/ansible | test/units/modules/network/f5/test_bigip_cli_script.py | 8 | 3465 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_cli_script import ApiParameters
from library.modules.bigip_cli_script import ModuleParameters
from library.modules.bigip_cli_script import ModuleManager
from library.modules.bigip_cli_script import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_cli_script import ApiParameters
from ansible.modules.network.f5.bigip_cli_script import ModuleParameters
from ansible.modules.network.f5.bigip_cli_script import ModuleManager
from ansible.modules.network.f5.bigip_cli_script import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
content="my content",
description="my description"
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.content == 'my content'
assert p.description == 'my description'
def test_api_parameters(self):
args = load_fixture('load_tm_cli_script_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.content == 'proc script::run {} {}'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
content='asdasds',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
astaninger/speakout | venv/lib/python3.6/site-packages/flask_pymongo/tests/test_gridfs.py | 2 | 3039 | from hashlib import md5
from io import BytesIO
from bson.objectid import ObjectId
from gridfs import GridFS
from werkzeug.exceptions import NotFound
import pytest
from flask_pymongo.tests.util import FlaskPyMongoTest
class GridFSCleanupMixin(object):
def tearDown(self):
gridfs = GridFS(self.mongo.db)
files = list(gridfs.find())
for gridfile in files:
gridfs.delete(gridfile._id)
super(GridFSCleanupMixin, self).tearDown()
class TestSaveFile(GridFSCleanupMixin, FlaskPyMongoTest):
def test_it_saves_files(self):
fileobj = BytesIO(b"these are the bytes")
self.mongo.save_file("my-file", fileobj)
gridfs = GridFS(self.mongo.db)
assert gridfs.exists({"filename": "my-file"})
def test_it_guesses_type_from_filename(self):
fileobj = BytesIO(b"these are the bytes")
self.mongo.save_file("my-file.txt", fileobj)
gridfs = GridFS(self.mongo.db)
gridfile = gridfs.find_one({"filename": "my-file.txt"})
assert gridfile.content_type == "text/plain"
def test_it_saves_files_with_props(self):
fileobj = BytesIO(b"these are the bytes")
self.mongo.save_file("my-file", fileobj, foo="bar")
gridfs = GridFS(self.mongo.db)
gridfile = gridfs.find_one({"filename": "my-file"})
assert gridfile.foo == "bar"
def test_it_returns_id(self):
fileobj = BytesIO(b"these are the bytes")
_id = self.mongo.save_file("my-file", fileobj, foo="bar")
assert type(_id) is ObjectId
class TestSendFile(GridFSCleanupMixin, FlaskPyMongoTest):
def setUp(self):
super(TestSendFile, self).setUp()
# make it bigger than 1 gridfs chunk
self.myfile = BytesIO(b"a" * 500 * 1024)
self.mongo.save_file("myfile.txt", self.myfile)
def test_it_404s_for_missing_files(self):
with pytest.raises(NotFound):
self.mongo.send_file("no-such-file.txt")
def test_it_sets_content_type(self):
resp = self.mongo.send_file("myfile.txt")
assert resp.content_type.startswith("text/plain")
def test_it_sets_content_length(self):
resp = self.mongo.send_file("myfile.txt")
assert resp.content_length == len(self.myfile.getvalue())
def test_it_sets_supports_conditional_gets(self):
# a basic conditional GET
environ_args = {
"method": "GET",
"headers": {
"If-None-Match": md5(self.myfile.getvalue()).hexdigest(),
},
}
with self.app.test_request_context(**environ_args):
resp = self.mongo.send_file("myfile.txt")
assert resp.status_code == 304
def test_it_sets_cache_headers(self):
resp = self.mongo.send_file("myfile.txt", cache_for=60)
assert resp.cache_control.max_age == 60
assert resp.cache_control.public is True
def test_it_streams_results(self):
resp = self.mongo.send_file("myfile.txt")
assert resp.is_streamed
| mit |
aitoehigie/gidimagic | venv/lib/python2.7/site-packages/setuptools/archive_util.py | 409 | 6601 | """Utilities for extracting common archive formats"""
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
import zipfile, tarfile, os, shutil, posixpath
from pkg_resources import ensure_directory
from distutils.errors import DistutilsError
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src,dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None
):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % (filename,))
paths = {filename:('',extract_dir)}
for base, dirs, files in os.walk(filename):
src,dst = paths[base]
for d in dirs:
paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d)
for f in files:
name = src+f
target = os.path.join(dst,f)
target = progress_filter(src+f, target)
if not target:
continue # skip non-files
ensure_directory(target)
f = os.path.join(base,f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
z = zipfile.ZipFile(filename)
try:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
try:
tarobj.chown = lambda *args: None # don't do any chowning!
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
linkpath = posixpath.join(posixpath.dirname(member.name), linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
tarobj._extract_member(member, final_dst) # XXX Ugh
except tarfile.ExtractError:
pass # chown/chmod/mkfifo/mknode/makedev failed
return True
finally:
tarobj.close()
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
| mit |
ahmetdaglarbas/e-commerce | oscar/apps/analytics/migrations/0002_auto_20140827_1705.py | 49 | 1091 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
('catalogue', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='userproductview',
name='product',
field=models.ForeignKey(verbose_name='Product', to='catalogue.Product'),
preserve_default=True,
),
migrations.AddField(
model_name='userproductview',
name='user',
field=models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='productrecord',
name='product',
field=models.OneToOneField(verbose_name='Product', related_name='stats', to='catalogue.Product'),
preserve_default=True,
),
]
| bsd-3-clause |
halexus/Uno | src/main.py | 1 | 4228 | #TODO: KI
"""
Uno: A clone of the cardgame UNO (C)
Copyright (C) 2011 Alexander Thaller <alex.t@gmx.at>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from random import randint
import os
import colorama
from deck import Deck
from discardpile import Discardpile
import player
import kiplayer
def nextId(currentId, noOfPlayers, isClockwise = True):
if isClockwise: #Direction of play can be changed by reverse card
return (currentId + 1) % noOfPlayers
else:
return (currentId - 1) % noOfPlayers
def clear_screen():
"""OS independent version to clear the cmd or shell window"""
if os.name == 'posix':
os.system("clear")
elif os.name == 'nt':
os.system("cls")
def printCopyright():
copy = """
Uno Copyright (C) 2011 Alexander Thaller
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under certain conditions; See LICENSE.txt for details
"""
print(copy)
print()
def makePlayers(deck):
noOfHumanPlayers = player.getNoOfPlayers() #How many human players?
noOfKiPlayers = kiplayer.getNoOfPlayers() #How many KI players?
players = player.makePlayers(noOfHumanPlayers, deck) #Initialize players with deck
players = players + kiplayer.makePlayers(noOfKiPlayers, noOfHumanPlayers, deck)
return players
def showNoOfCardsInHand(players, idCurrentPlayer):
for player in players:
if player.getId() == idCurrentPlayer:
continue
cardsInHand = player.getHand().noOfCardsInHand()
print('%s has %d cards' % (player.getName(), cardsInHand), end='; ')
print()
if __name__ == '__main__':
colorama.init() #Initialize color output
print(colorama.Back.WHITE + colorama.Fore.BLACK) #Background white, textcolor black
clear_screen() #To make background white
printCopyright()
discardPile = Discardpile() #Build the initial (empty) discard pile
deck = Deck(discardPile) #Build a deck with 108 cards
players = makePlayers(deck) #Initialize players with deck
noOfPlayers = len(players)
idCurrentPlayer = randint(0, noOfPlayers-1) #ID of first player in first round
discardPile.putCard(deck.drawCard()[0], players[idCurrentPlayer]) #Put first card at discard pile
isClockwise = True #Direction of play. Can be changed by reverse card
while True: #Game loop
clear_screen()
topCard = discardPile.getCardOnTop()
currentPlayer = players[idCurrentPlayer]
showNoOfCardsInHand(players, idCurrentPlayer) #Print how many cards the other players have
print('Pile: %s\n' % topCard) #Show top card on discard pile
cardToPlay = currentPlayer.takeTurn(deck, topCard, noOfPlayers)
if cardToPlay == None: #It's None if a card was drawn
print('%s draws a card.' % currentPlayer.getName())
elif cardToPlay == 'skip': #It's 'skip' if player's turn is skipped
print("%s's turn is skipped." % currentPlayer.getName())
else: #A card was chosen to play
discardPile.putCard(cardToPlay, currentPlayer) #Put played card on pile
if cardToPlay.isReverse(): #Did player play a reverse card?
isClockwise = not isClockwise #Order of play reversed
if currentPlayer.isWinner():
print('%s wins the game!' % currentPlayer.getName())
break
idCurrentPlayer = nextId(currentPlayer.getId(), noOfPlayers, isClockwise) #Id of next player
input()
| gpl-3.0 |
ryfx/modrana | core/platform_detection.py | 1 | 2696 | # modRana current-platform detection
import os
import sys
from core import qrc
DEFAULT_DEVICE_MODULE_ID = "pc"
DEFAULT_GUI_MODULE_ID = "GTK"
import logging
log = logging.getLogger("core.platform_detection")
def getBestDeviceModuleId():
log.info("** detecting current device **")
result = _check()
if result is not None:
deviceModuleId = result
else:
deviceModuleId = DEFAULT_DEVICE_MODULE_ID # use GTK GUI module as fallback
log.info("* no known device detected")
log.info("** selected %s as device module ID **" % deviceModuleId)
return deviceModuleId
def getBestGUIModuleId():
return DEFAULT_GUI_MODULE_ID
def _check():
"""
try to detect current device
"""
# qrc is currently used only on Android, so if we are running with
# qrc, we are on Android
if qrc.is_qrc:
return "android"
# check CPU architecture
import subprocess
proc = subprocess.Popen(['uname', '-m', ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
arch = str(proc.communicate()[0])
if ("i686" in arch) or ("x86_64" in arch):
log.info("* PC detected")
return "pc" # we are most probably on a PC
if sys.platform == "qnx6":
log.info("* BlackBerry 10 device detected")
return "bb10"
# check procFS
if os.path.exists("/proc/cpuinfo"):
f = open("/proc/cpuinfo", "r")
cpuinfo = f.read()
f.close()
if "Nokia RX-51" in cpuinfo: # N900
log.info("* Nokia N900 detected")
return "n900"
# N9 and N950 share the same device module
elif "Nokia RM-680" in cpuinfo: # N950
log.info("* Nokia N950 detected")
return "n9"
elif "Nokia RM-696" in cpuinfo: # N9
log.info("* Nokia N9 detected")
return "n9"
elif "GTA02" in cpuinfo: # N9
log.info("* Neo FreeRunner GTA02 detected")
return "neo"
# check lsb_release
try:
proc = subprocess.Popen(['lsb_release', '-s', '-i'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
distributionId = proc.communicate()[0].decode("utf-8").lower().strip()
log.info(distributionId)
# import pdb; pdb.set_trace()
if distributionId == 'mer':
# TODO: could be ale Nemo mobile or other Mer based distro,
# we should probably discern those two in the future
log.info("* Jolla (or other Mer based device) detected")
return "jolla"
except:
log.exception("running lsb_release during platform detection failed")
return None
| gpl-3.0 |
shifter/rekall | rekall-core/rekall/plugins/addrspaces/elfcore.py | 3 | 9619 | # Rekall Memory Forensics
#
# Copyright 2012 Philippe Teuwen, Thorsten Sick, Michael Cohen
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""An Address Space for processing ELF64 coredumps."""
# References:
# VirtualBox core format:
# http://www.virtualbox.org/manual/ch12.html#guestcoreformat
# ELF64 format: http://downloads.openwatcom.org/ftp/devel/docs/elf-64-gen.pdf
# Note that as of version 1.6.0 WinPmem also uses ELF64 as the default imaging
# format. Except that WinPmem stores image metadata in a YAML file stored in the
# image. This address space supports both formats.
import os
import yaml
from rekall import addrspace
from rekall.plugins.overlays.linux import elf
PT_PMEM_METADATA = 0x6d656d70 # Spells 'pmem'
class Elf64CoreDump(addrspace.RunBasedAddressSpace):
"""This AS supports ELF64 coredump format, as used by VirtualBox."""
order = 30
__name = "elf64"
__image = True
def __init__(self, **kwargs):
super(Elf64CoreDump, self).__init__(**kwargs)
# Check the file for sanity.
self.check_file()
self.offset = 0
self.fname = ''
self._metadata = {}
# Now parse the ELF file.
elf_profile = elf.ELFProfile(session=self.session)
self.elf64_hdr = elf_profile.elf64_hdr(vm=self.base, offset=0)
self.as_assert(self.elf64_hdr.e_type == "ET_CORE",
"Elf file is not a core file.")
self.name = "%s|%s" % (self.__class__.__name__, self.base.name)
# Iterate over all the program headers and map the runs.
for segment in self.elf64_hdr.e_phoff:
if segment.p_type == "PT_LOAD":
# Some load segments are empty.
if (segment.p_filesz == 0 or
segment.p_filesz != segment.p_memsz):
continue
# Add the run to the memory map.
virtual_address = int(segment.p_paddr) or int(segment.p_vaddr)
self.runs.insert((virtual_address, # Virtual Addr
int(segment.p_offset), # File Addr
int(segment.p_memsz))) # Length
elif segment.p_type == PT_PMEM_METADATA:
self.LoadMetadata(segment.p_offset)
# Search for the pmem footer signature.
footer = self.base.read(self.base.end() - 10000, 10000)
if "...\n" in footer[-6:]:
header_offset = footer.rfind("# PMEM")
if header_offset > 0:
self.LoadMetadata(self.base.end() - 10000 + header_offset)
def check_file(self):
"""Checks the base file handle for sanity."""
self.as_assert(self.base,
"Must stack on another address space")
## Must start with the magic for elf
self.as_assert((self.base.read(0, 4) == "\177ELF"),
"Header signature invalid")
def LoadMetadata(self, offset):
"""Load the WinPmem metadata from the elf file."""
try:
data = self.base.read(offset, 1024*1024)
yaml_file = data.split('...\n')[0]
metadata = yaml.safe_load(yaml_file)
except (yaml.YAMLError, TypeError) as e:
self.session.logging.error(
"Invalid file metadata, skipping: %s" % e)
return
for session_param, metadata_key in (("dtb", "CR3"),
("kernel_base", "KernBase")):
if metadata_key in metadata:
self.session.SetParameter(
session_param, metadata[metadata_key])
previous_section = metadata.pop("PreviousHeader", None)
if previous_section is not None:
self.LoadMetadata(previous_section)
pagefile_offset = metadata.get("PagefileOffset", None)
pagefile_size = metadata.get("PagefileSize", None)
if pagefile_offset is not None and pagefile_size is not None:
self.LoadPageFile(pagefile_offset, pagefile_size)
self._metadata.update(metadata)
pagefile_offset = 0
pagefile_end = 0
def LoadPageFile(self, pagefile_offset, pagefile_size):
"""We map the page file into the physical address space.
This allows us to treat all physical addresses equally - regardless if
they come from the memory or the page file.
"""
# Map the pagefile after the end of the physical address space.
vaddr = self.end() + 0x10000
self.session.logging.info(
"Loading pagefile into physical offset %#08x", vaddr)
self.runs.insert((vaddr, pagefile_offset, pagefile_size))
# Remember the region for the pagefile.
self.pagefile_offset = vaddr
self.pagefile_end = vaddr + pagefile_size
def describe(self, addr):
if self.pagefile_offset <= addr <= self.pagefile_end:
return "%#x@Pagefile" % (
addr - self.pagefile_offset)
return "%#x" % addr
class KCoreAddressSpace(Elf64CoreDump):
"""A Linux kernel's /proc/kcore file also maps the entire physical ram.
http://lxr.free-electrons.com/source/Documentation/x86/x86_64/mm.txt
ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all
physical memory.
"""
# We must run before the regular Elf64CoreDump address space in the voting
# order.
order = Elf64CoreDump.order - 1
__name = "elf64"
__image = True
volatile = True
def __init__(self, **kwargs):
super(KCoreAddressSpace, self).__init__(**kwargs)
# Collect all ranges between ffff880000000000 - ffffc7ffffffffff
runs = []
for vaddr, paddr, length in self.runs:
if 0xffff880000000000 < vaddr < 0xffffc7ffffffffff:
runs.append((vaddr - 0xffff880000000000, paddr, length))
self.as_assert(runs, "No kcore compatible virtual ranges.")
self.runs.clear()
# At this point, we think this is a valid, usable kcore file.
# RHEL, however, disabled read access to /proc/kcore past the ELF
# headers and the file size reflects this. /proc/kcore usually has a
# size of at least 64TB (46bits of physical address space in x64).
# We use the file size to detect cases where kcore will be unusable.
if getattr(self.base, "fhandle", None):
try:
statinfo = os.fstat(self.base.fhandle.fileno())
if statinfo.st_size < 2**46:
# We raise a TypeError and not an ASAssertionError because
# we need it to be catchable when rekall is used as a
# library (i.e GRR). ASAssertionErrors are swallowed by
# the address space selection algorithm.
raise RuntimeError(
"This kcore file is too small (%d bytes) and likely "
"invalid for memory analysis. You may want to use pmem "
"instead." % statinfo.st_size)
except(IOError, AttributeError):
pass
for x in runs:
self.runs.insert(x)
def WriteElfFile(address_space, outfd, session=None):
"""Convert the address_space to an ELF Core dump file.
The Core dump will be written to outfd which is expected to have a .write()
method.
"""
runs = list(address_space.get_available_addresses())
elf_profile = elf.ELFProfile(session=session)
elf64_pheader = elf_profile.elf64_phdr()
elf64_pheader.p_type = "PT_LOAD"
elf64_pheader.p_align = 0x1000
elf64_pheader.p_flags = "PF_R"
elf64_header = elf_profile.elf64_hdr()
elf64_header.e_ident = elf64_header.e_ident.signature
elf64_header.e_type = 'ET_CORE'
elf64_header.e_phoff = elf64_header.obj_end
elf64_header.e_ehsize = elf64_header.obj_size
elf64_header.e_phentsize = elf64_pheader.obj_size
elf64_header.e_phnum = len(runs)
elf64_header.e_shnum = 0 # We don't have any sections.
# Where we start writing data.
file_offset = (elf64_header.obj_size +
# One Phdr for each run.
len(runs) * elf64_pheader.obj_size)
outfd.write(elf64_header.GetData())
for offset, _, length in runs:
elf64_pheader.p_paddr = offset
elf64_pheader.p_memsz = length
elf64_pheader.p_offset = file_offset
elf64_pheader.p_filesz = length
outfd.write(elf64_pheader.GetData())
file_offset += length
# Now just copy all the runs
total_data = 0
for offset, _, length in runs:
while length > 0:
data = address_space.read(offset, min(10000000, length))
session.report_progress("Writing %sMb", total_data/1024/1024)
outfd.write(data)
length -= len(data)
offset += len(data)
total_data += len(data)
| gpl-2.0 |
mcella/django | django/contrib/sessions/models.py | 347 | 1298 | from __future__ import unicode_literals
from django.contrib.sessions.base_session import (
AbstractBaseSession, BaseSessionManager,
)
class SessionManager(BaseSessionManager):
use_in_migrations = True
class Session(AbstractBaseSession):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
objects = SessionManager()
@classmethod
def get_session_store_class(cls):
from django.contrib.sessions.backends.db import SessionStore
return SessionStore
class Meta(AbstractBaseSession.Meta):
db_table = 'django_session'
| bsd-3-clause |
drinkssu/YourVoiceAlarmBackend | lib/werkzeug/contrib/atom.py | 311 | 15281 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| apache-2.0 |
citrix-openstack-build/tempest | tempest/services/compute/json/images_client.py | 6 | 5332 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
class ImagesClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(ImagesClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
self.build_interval = self.config.compute.build_interval
self.build_timeout = self.config.compute.build_timeout
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body, self.headers)
return resp, body
def list_images(self, params=None):
"""Returns a list of all images filtered by any parameters."""
url = 'images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['images']
def list_images_with_detail(self, params=None):
"""Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['images']
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
self.expected_success(200, resp)
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
"""Deletes the provided image."""
return self.delete("images/%s" % str(image_id))
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
resp, image = self.get_image(image_id)
start = int(time.time())
while image['status'] != status:
time.sleep(self.build_interval)
resp, image = self.get_image(image_id)
if image['status'] == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
if int(time.time()) - start >= self.build_timeout:
raise exceptions.TimeoutException
def list_image_metadata(self, image_id):
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
return resp, body['metadata']
def set_image_metadata(self, image_id, meta):
"""Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_image_metadata(self, image_id, meta):
"""Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
return resp, body['meta']
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body, self.headers)
body = json.loads(body)
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
return resp, body
def is_resource_deleted(self, id):
try:
self.get_image(id)
except exceptions.NotFound:
return True
return False
| apache-2.0 |
rugk/letsencrypt | letsencrypt/constants.py | 10 | 2636 | """Let's Encrypt constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Setuptools entry point group name for plugins."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
verbose_count=-(logging.WARNING / 10),
server="https://acme-staging.api.letsencrypt.org/acme/new-reg",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
dvsni_port=challenges.DVSNI.PORT,
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
)
"""Defaults for CLI flags and `.IConfig` attributes."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
deploy_before_expiry="20 days",
)
"""Defaults for renewer script."""
EXCLUSIVE_CHALLENGES = frozenset([frozenset([
challenges.DVSNI, challenges.SimpleHTTP])])
"""Mutually exclusive challenges."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`letsencrypt.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: TODO
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CERT_DIR = "certs"
"""See `.IConfig.cert_dir`."""
CERT_KEY_BACKUP_DIR = "keys-certs"
"""Directory where all certificates and keys are stored (relative to
`IConfig.work_dir`). Used for easy revocation."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "configs"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
RENEWER_CONFIG_FILENAME = "renewer.conf"
"""Renewer config file name (relative to `IConfig.config_dir`)."""
| apache-2.0 |
shastikk/youtube-dl | youtube_dl/extractor/mitele.py | 18 | 2942 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import (
encode_dict,
get_element_by_attribute,
int_or_none,
)
class MiTeleIE(InfoExtractor):
IE_DESC = 'mitele.es'
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
'md5': 'ace7635b2a0b286aaa37d3ff192d2a8a',
'info_dict': {
'id': '0NF1jJnxS1Wu3pHrmvFyw2',
'display_id': 'programa-144',
'ext': 'flv',
'title': 'Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'thumbnail': 're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
config_url = self._search_regex(
r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url')
config = self._download_json(
config_url, display_id, 'Downloading config JSON')
mmc = self._download_json(
config['services']['mmc'], display_id, 'Downloading mmc JSON')
formats = []
for location in mmc['locations']:
gat = self._proto_relative_url(location.get('gat'), 'http:')
bas = location.get('bas')
loc = location.get('loc')
ogn = location.get('ogn')
if None in (gat, bas, loc, ogn):
continue
token_data = {
'bas': bas,
'icd': loc,
'ogn': ogn,
'sta': '0',
}
media = self._download_json(
'%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data)).encode('utf-8')),
display_id, 'Downloading %s JSON' % location['loc'])
file_ = media.get('file')
if not file_:
continue
formats.extend(self._extract_f4m_formats(
file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
display_id, f4m_id=loc))
title = self._search_regex(
r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title')
video_id = self._search_regex(
r'data-media-id\s*=\s*"([^"]+)"', webpage,
'data media id', default=None) or display_id
thumbnail = config.get('poster', {}).get('imageUrl')
duration = int_or_none(mmc.get('duration'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': get_element_by_attribute('class', 'text', webpage),
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense |
zstackio/zstack-woodpecker | integrationtest/vm/perf/sg/test_crt_sg_with_max_threads.py | 4 | 5749 | '''
New Perf Test for creating SG and other SG rules related operations.
The created number will depends on the environment variable: ZSTACK_TEST_NUM
The default max threads are 1000. It could be modified by env variable:
ZSTACK_THREAD_THRESHOLD
This case might need to run in KVM simulator environemnt, if there are not
enought resource in real environment.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.zstack_test.zstack_test_volume as test_vol_header
import apibinding.inventory as inventory
import time
import os
import sys
import threading
import random
_config_ = {
'timeout' : 10000,
'noparallel' : True
}
session_uuid = None
session_to = None
session_mc = None
exc_info = []
rules = []
def create_sg(sg_option, l3_uuid, nic_uuid):
try:
#create security group
sg = net_ops.create_security_group(sg_option)
#add rule
net_ops.add_rules_to_security_group(sg.uuid, rules, session_uuid)
#attach to l3
net_ops.attach_security_group_to_l3(sg.uuid, l3_uuid, session_uuid)
#attach to vm
net_ops.add_nic_to_security_group(sg.uuid, [nic_uuid], session_uuid)
except:
exc_info.append(sys.exc_info())
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def create_1k_rule():
num = 1
#while num < 1001:
#while num < 101:
while num < 11:
rule = inventory.SecurityGroupRuleAO()
rule.allowedCidr = '192.168.0.1/32'
rule.endPort = 2*num + 1
rule.startPort = 2*num
rule.protocol = inventory.TCP
rule.type = inventory.EGRESS
rules.append(rule)
num += 1
def test():
global session_uuid
global session_to
global session_mc
thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD')
if not thread_threshold:
thread_threshold = 1000
else:
thread_threshold = int(thread_threshold)
sg_num = os.environ.get('ZSTACK_TEST_NUM')
if not sg_num:
sg_num = 0
else:
sg_num = int(sg_num)
create_1k_rule()
#change account session timeout.
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
session_uuid = acc_ops.login_as_admin()
test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
test_util.test_logger('ZSTACK_TEST_NUM is %d' % sg_num)
org_num = sg_num
sg_option = test_util.SecurityGroupOption()
l3_name = os.environ.get('l3VlanNetworkName1')
conditions = res_ops.gen_query_conditions('name', '=', l3_name)
l3_uuid = res_ops.query_resource_with_num(res_ops.L3_NETWORK, conditions, \
session_uuid, start = 0, limit = 1)[0].uuid
cond = res_ops.gen_query_conditions('l3NetworkUuid', '=', l3_uuid)
vm_nic_num = res_ops.query_resource_count(res_ops.VM_NIC, cond, \
session_uuid)
if vm_nic_num < thread_threshold:
test_util.test_fail('This test needs: %d vm nics for pf attaching and detaching operations. But there are only %d vm nics. Please use this case: test_crt_basic_vm_with_max_threads.py to create required VMs.' % (thread_threshold, vm_nic_num))
vm_nics = res_ops.query_resource_fields(res_ops.VM_NIC, cond, \
session_uuid, ['uuid'], 0, thread_threshold)
nics = []
for nic in vm_nics:
nics.append(nic.uuid)
random_name = random.random()
sg_name = 'perf_sg_%s' % str(random_name)
sg_option.set_name(sg_name)
sg_option.set_session_uuid(session_uuid)
vm_num = 0
while sg_num > 0:
check_thread_exception()
sg_num -= 1
sg_option.set_description(org_num - sg_num)
if vm_num > (thread_threshold - 1):
vm_num = 0
thread = threading.Thread(target=create_sg, args = (sg_option, l3_uuid, nics[vm_num], ))
vm_num += 1
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', sg_name)
sgs_num = res_ops.query_resource_count(res_ops.SECURITY_GROUP, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
if sgs_num == org_num:
test_util.test_pass('Create %d SGs Test Success' % org_num)
else:
test_util.test_fail('Create %d SGs Test Failed. Only find %d SGs.' % (org_num, sgs_num))
#Will be called only if exception happens in test().
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if session_uuid:
acc_ops.logout(session_uuid)
| apache-2.0 |
40223101/2015final2 | static/Brython3.1.1-20150328-091302/Lib/browser/markdown.py | 623 | 13060 | # -*- coding: utf-8 -*-
try:
import _jsre as re
except:
import re
import random
import time
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'0123456789'
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
if line.startswith("```") and len(line)>3:
self.info = line[3:]
else:
self.info = None
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
_class = self.info or "marked"
res = '<pre class="%s">%s</pre>\n' %(_class, res)
return res,[]
class HtmlBlock:
def __init__(self, src):
self.src = src
def to_html(self):
return self.src
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
t0 = time.time()
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')+['']
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and len(lines[i].lstrip())>1 \
and lines[i].lstrip()[1]==' ' \
and (i==0 or ul or not lines[i-1].strip()):
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul and not lines[i].strip():
if i<len(lines)-1 and lines[i+1].strip() \
and not lines[i+1].startswith(' '):
nline = lines[i+1].lstrip()
if nline[0] in '-+*' and len(nline)>1 and nline[1]==' ':
pass
else:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol and not lines[i].strip() and i<len(lines)-1 \
and not lines[i+1].startswith(' ') \
and not re.search(r'^(\d+\.)',lines[i+1]):
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
if ul:
lines.append('</ul>'*ul)
if ol:
lines.append('</ol>'*ol)
if bq:
lines.append('</blockquote>'*bq)
t1 = time.time()
#print('part 1', t1-t0)
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.strip() and line.startswith("```"):
# fenced code blocks à la Github Flavoured Markdown
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line)
j = i+1
while j<len(lines) and not lines[j].startswith("```"):
section.lines.append(lines[j])
j += 1
sections.append(section)
section = Marked()
i = j+1
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
# atext header
elif line.startswith('#'):
level = 1
line = lines[i]
while level<len(line) and line[level]=='#' and level<=6:
level += 1
if not line[level+1:].strip():
if level==1:
i += 1
continue
else:
lines[i] = '<H%s>%s</H%s>\n' %(level-1,'#',level-1)
else:
lines[i] = '<H%s>%s</H%s>\n' %(level,line[level+1:],level)
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if not line.strip():
line = '<p></p>'
if section.line:
section.line += '\n'
section.line += line
i += 1
t2 = time.time()
#print('section 2', t2-t1)
if isinstance(section,Marked) and section.line:
sections.append(section)
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += mk
scripts += _scripts
#print('end mark', time.time()-t2)
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
czone = czone.replace('_','_')
czone = czone.replace('*','*')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
key = None
t0 = time.time()
i = 0
while i<len(src):
if src[i]=='[':
start_a = i+1
while True:
end_a = src.find(']',i)
if end_a == -1:
break
if src[end_a-1]=='\\':
i = end_a+1
else:
break
if end_a>-1 and src[start_a:end_a].find('\n')==-1:
link = src[start_a:end_a]
rest = src[end_a+1:].lstrip()
if rest and rest[0]=='(':
j = 0
while True:
end_href = rest.find(')',j)
if end_href == -1:
break
if rest[end_href-1]=='\\':
j = end_href+1
else:
break
if end_href>-1 and rest[:end_href].find('\n')==-1:
tag = '<a href="'+rest[1:end_href]+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_href+1:]
i = start_a+len(tag)
elif rest and rest[0]=='[':
j = 0
while True:
end_key = rest.find(']',j)
if end_key == -1:
break
if rest[end_key-1]=='\\':
j = end_key+1
else:
break
if end_key>-1 and rest[:end_key].find('\n')==-1:
if not key:
key = link
if key.lower() not in refs:
raise KeyError('unknown reference %s' %key)
url = refs[key.lower()]
tag = '<a href="'+url+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_key+1:]
i = start_a+len(tag)
i += 1
t1 = time.time()
#print('apply markdown 1', t1-t0)
# before applying the markup with _ and *, isolate HTML tags because
# they can contain these characters
# We replace them temporarily by a random string
rstr = ''.join(random.choice(letters) for i in range(16))
i = 0
state = None
start = -1
data = ''
tags = []
while i<len(src):
if src[i]=='<':
j = i+1
while j<len(src):
if src[j]=='"' or src[j]=="'":
if state==src[j] and src[j-1]!='\\':
state = None
j = start+len(data)+1
data = ''
elif state==None:
state = src[j]
start = j
else:
data += src[j]
elif src[j]=='>' and state is None:
tags.append(src[i:j+1])
src = src[:i]+rstr+src[j+1:]
i += len(rstr)
break
elif state=='"' or state=="'":
data += src[j]
elif src[j]=='\n':
# if a sign < is not followed by > in the same ligne, it
# is the sign "lesser than"
src = src[:i]+'<'+src[i+1:]
j=i+4
break
j += 1
elif src[i]=='`' and i>0 and src[i-1]!='\\':
# ignore the content of inline code
j = i+1
while j<len(src):
if src[j]=='`' and src[j-1]!='\\':
break
j += 1
i = j
i += 1
t2 = time.time()
#print('apply markdown 2', len(src), t2-t1)
# escape "<", ">", "&" and "_" in inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,s_escape,src)
# replace escaped ` _ * by HTML characters
src = src.replace(r'\\`','`')
src = src.replace(r'\_','_')
src = src.replace(r'\*','*')
# emphasis
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# restore tags
while True:
pos = src.rfind(rstr)
if pos==-1:
break
repl = tags.pop()
src = src[:pos]+repl+src[pos+len(rstr):]
src = '<p>'+src+'</p>'
t3 = time.time()
#print('apply markdown 3', t3-t2)
return src,scripts
| gpl-3.0 |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/admin_widgets/tests.py | 38 | 15241 | # encoding: utf-8
from datetime import datetime
from unittest import TestCase
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.widgets import FilteredSelectMultiple, AdminSplitDateTime
from django.contrib.admin.widgets import (AdminFileWidget, ForeignKeyRawIdWidget,
ManyToManyRawIdWidget)
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import DateField
from django.test import TestCase as DjangoTestCase
from django.utils.html import conditional_escape
from django.utils.translation import activate, deactivate
import models
class AdminFormfieldForDBFieldTests(TestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin): pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assert_(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % \
(model.__class__.__name__, fieldname, widgetclass, type(widget))
)
# Return the formfield so that other tests can continue
return ff
def testDateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def testDateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def testTimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def testTextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def testURLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def testIntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def testCharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def testFileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def testForeignKey(self):
self.assertFormfield(models.Event, 'band', forms.Select)
def testRawIDForeignKey(self):
self.assertFormfield(models.Event, 'band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['band'])
def testRadioFieldsForeignKey(self):
ff = self.assertFormfield(models.Event, 'band', widgets.AdminRadioSelect,
radio_fields={'band':admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def testManyToMany(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def testRawIDManyTOMany(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def testFilteredManyToMany(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def testFormfieldOverrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def testFieldWithChoices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def testChoicesWithRadioFields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender':admin.VERTICAL})
def testInheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
class AdminFormfieldForDBFieldWithRequestTests(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def testFilterChoicesByRequestUser(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get("/widget_admin/admin_widgets/cartire/add/")
self.assert_("BMW M3" not in response.content)
self.assert_("Volkswagon Passat" in response.content)
class AdminForeignKeyWidgetChangeList(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
admin_root = '/widget_admin'
def setUp(self):
self.client.login(username="super", password="secret")
def tearDown(self):
self.client.logout()
def test_changelist_foreignkey(self):
response = self.client.get('%s/admin_widgets/car/' % self.admin_root)
self.assertTrue('%s/auth/user/add/' % self.admin_root in response.content)
class AdminForeignKeyRawIdWidget(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
admin_root = '/widget_admin'
def setUp(self):
self.client.login(username="super", password="secret")
def tearDown(self):
self.client.logout()
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"band": u'%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post('%s/admin_widgets/event/add/' % self.admin_root,
post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post('%s/admin_widgets/event/add/' % self.admin_root,
{"band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
class FilteredSelectMultipleWidgetTest(TestCase):
def test_render(self):
w = FilteredSelectMultiple('test', False)
self.assertEqual(
conditional_escape(w.render('test', 'test')),
'<select multiple="multiple" name="test" class="selectfilter">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 0, "%(ADMIN_MEDIA_PREFIX)s"); });</script>\n' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX}
)
def test_stacked_render(self):
w = FilteredSelectMultiple('test', True)
self.assertEqual(
conditional_escape(w.render('test', 'test')),
'<select multiple="multiple" name="test" class="selectfilterstacked">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 1, "%(ADMIN_MEDIA_PREFIX)s"); });</script>\n' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX}
)
class AdminSplitDateTimeWidgetTest(TestCase):
def test_render(self):
w = AdminSplitDateTime()
self.assertEqual(
conditional_escape(w.render('test', datetime(2007, 12, 1, 9, 30))),
'<p class="datetime">Date: <input value="2007-12-01" type="text" class="vDateField" name="test_0" size="10" /><br />Time: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
def test_localization(self):
w = AdminSplitDateTime()
activate('de-at')
old_USE_L10N = settings.USE_L10N
try:
settings.USE_L10N = True
w.is_localized = True
self.assertEqual(
conditional_escape(w.render('test', datetime(2007, 12, 1, 9, 30))),
'<p class="datetime">Datum: <input value="01.12.2007" type="text" class="vDateField" name="test_0" size="10" /><br />Zeit: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
finally:
deactivate()
settings.USE_L10N = old_USE_L10N
class AdminFileWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
w = AdminFileWidget()
self.assertEqual(
conditional_escape(w.render('test', album.cover_art)),
'Currently: <a target="_blank" href="%(STORAGE_URL)salbums/hybrid_theory.jpg">albums\hybrid_theory.jpg</a> <br />Change: <input type="file" name="test" />' % {'STORAGE_URL': default_storage.url('')},
)
self.assertEqual(
conditional_escape(w.render('test', SimpleUploadedFile('test', 'content'))),
'<input type="file" name="test" />',
)
def test_render_escapes_html(self):
class StrangeFieldFile(object):
url = "something?chapter=1§=2©=3&lang=en"
def __unicode__(self):
return u'''something<div onclick="alert('oops')">.jpg'''
widget = AdminFileWidget()
field = StrangeFieldFile()
output = widget.render('myfile', field)
self.assertFalse(field.url in output)
self.assertTrue(u'href="something?chapter=1&sect=2&copy=3&lang=en"' in output)
self.assertFalse(unicode(field) in output)
self.assertTrue(u'something<div onclick="alert('oops')">.jpg' in output)
class ForeignKeyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').rel
w = ForeignKeyRawIdWidget(rel)
self.assertEqual(
conditional_escape(w.render('test', band.pk, attrs={})),
'<input type="text" name="test" value="%(bandpk)s" class="vForeignKeyRawIdAdminField" /><a href="../../../admin_widgets/band/?t=id" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_MEDIA_PREFIX)simg/admin/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Linkin Park</strong>' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX, "bandpk": band.pk},
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').rel
w = ForeignKeyRawIdWidget(rel)
self.assertEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" class="vForeignKeyRawIdAdminField" /><a href="../../../admin_widgets/inventory/?t=barcode" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_MEDIA_PREFIX)simg/admin/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Apple</strong>' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX},
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').rel
w = ForeignKeyRawIdWidget(rel)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" /><a href="../../../admin_widgets/inventory/?t=barcode" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_MEDIA_PREFIX)simg/admin/selector-search.gif" width="16" height="16" alt="Lookup" /></a> <strong>Hidden</strong>' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX},
)
class ManyToManyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').rel
w = ManyToManyRawIdWidget(rel)
self.assertEqual(
conditional_escape(w.render('test', [m1.pk, m2.pk], attrs={})),
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" /><a href="../../../admin_widgets/member/" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_MEDIA_PREFIX)simg/admin/selector-search.gif" width="16" height="16" alt="Lookup" /></a>' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX, "m1pk": m1.pk, "m2pk": m2.pk},
)
self.assertEqual(
conditional_escape(w.render('test', [m1.pk])),
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField" /><a href="../../../admin_widgets/member/" class="related-lookup" id="lookup_id_test" onclick="return showRelatedObjectLookupPopup(this);"> <img src="%(ADMIN_MEDIA_PREFIX)simg/admin/selector-search.gif" width="16" height="16" alt="Lookup" /></a>' % {"ADMIN_MEDIA_PREFIX": settings.ADMIN_MEDIA_PREFIX, "m1pk": m1.pk},
)
self.assertEqual(w._has_changed(None, None), False)
self.assertEqual(w._has_changed([], None), False)
self.assertEqual(w._has_changed(None, [u'1']), True)
self.assertEqual(w._has_changed([1, 2], [u'1', u'2']), False)
self.assertEqual(w._has_changed([1, 2], [u'1']), True)
self.assertEqual(w._has_changed([1, 2], [u'1', u'3']), True)
| gpl-3.0 |
Mercy-Nekesa/sokoapp | sokoapp/trending/managers.py | 2 | 1033 | import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Sum
from django.contrib.contenttypes.models import ContentType
class TrendingManager(models.Manager):
def trending(self, model, days=30, kind=""):
views = self.filter(
viewed_content_type=ContentType.objects.get_for_model(model),
views_on__gte=datetime.date.today() - datetime.timedelta(days=days),
kind=kind
).values(
"viewed_content_type",
"viewed_object_id",
"kind"
).annotate(
num_views=Sum("count")
).order_by("-num_views")
for d in views:
try:
d["object"] = ContentType.objects.get_for_id(
d["viewed_content_type"]
).get_object_for_this_type(
pk=d["viewed_object_id"]
)
except ObjectDoesNotExist:
d["object"] = None
return views
| mit |
40223137/w1717 | wsgi.py | 1 | 50394 | # coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
import math
from cherrypy.lib.static import serve_file
# 導入 gear 模組
#import gear
import man
import man2
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
@cherrypy.expose
def index(self, guess=None):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
第七組齒輪部分<br />
<a href="mygeartest2">mygeartest2</a><br />
以下為40223137(黃柏學)的齒輪<br />
<a href="mygeartest3">mygeartest3</a><br />
以下為40223137(黃柏學)與40223120(林家豪)的協同齒輪<br />
<a href="mygeartest4">mygeartest4</a><br />
<a href="man">man(自動組立)</a><br />
<a href="man2">man2(全組分工組立)</a><br />
</body>
</html>
'''
return outstring
@cherrypy.expose
def index2(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def twoDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do2Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest2(self , M=15, P=15,N1=7, N2=9,N3=11,N4=13,N5=15 ,N6=17):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest2>
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
齒輪1齒數:<input type=text name=N1><br />
齒輪2齒數:<input type=text name=N2><br />
齒輪3齒數:<input type=text name=N3><br />
齒輪4齒數:<input type=text name=N4><br />
齒輪5齒數:<input type=text name=N5><br />
齒輪6齒數:<input type=text name=N6><br />
<input type=submit value=send>
</form>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa ='''+str(P)+'''
# m 為模數
m = '''+str(M)+'''
# 第1齒輪齒數
n_g1 = '''+str(N1)+'''
# 第2齒輪齒數
n_g2 = '''+str(N2)+'''
# 第3齒輪齒數
n_g3 = '''+str(N3)+'''
# 第4齒輪齒數
n_g4 = '''+str(N4)+'''
# 第5齒輪齒數
n_g5 = '''+str(N5)+'''
# 第5齒輪齒數
n_g6 = '''+str(N6)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
rp_g4 = m*n_g4/2
rp_g5 = m*n_g5/2
rp_g6 = m*n_g6/2
# 繪圖第1齒輪的圓心座標
x_g1 = 200
y_g1 = 200
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 第4齒輪的圓心座標
x_g4 = x_g1 + rp_g1 + 2*rp_g2 +2* rp_g3+rp_g4
y_g4= y_g1
# 第5齒輪的圓心座標
x_g5 = x_g1+ rp_g1 + 2*rp_g2 +2* rp_g3+2*rp_g4+rp_g5
y_g5= y_g1
# 第6齒輪的圓心座標
x_g6 = x_g1+ rp_g1 + 2*rp_g2 +2* rp_g3+2*rp_g4+2*rp_g5+rp_g6
y_g6= y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
#齒輪4
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g4, y_g4)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g4+(pi+pi/n_g3)*n_g3/n_g4-(pi+pi/n_g2)*n_g2/n_g4)
# put it back
ctx.translate(-x_g4, -y_g4)
spur.Spur(ctx).Gear(x_g4, y_g4, rp_g4, n_g4, pa, "pink")
ctx.restore()
#齒輪5
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g5, y_g5)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g5+(pi+pi/n_g4)*n_g4/n_g5-(pi+pi/n_g3)*n_g3/n_g5+(pi+pi/n_g2)*n_g2/n_g5)
# put it back
ctx.translate(-x_g5, -y_g5)
spur.Spur(ctx).Gear(x_g5, y_g5, rp_g5, n_g5, pa, "yellow")
ctx.restore()
#齒輪6
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g6, y_g6)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g6+(pi+pi/n_g5)*n_g5/n_g6-
(pi+pi/n_g4)*n_g4/n_g6+(pi+pi/n_g3)*n_g3/n_g6- (pi+pi/n_g2)*n_g2/n_g6)
# put it back
ctx.translate(-x_g6, -y_g6)
spur.Spur(ctx).Gear(x_g6, y_g6, rp_g6, n_g6, pa, "pruple")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="2500" height="1500"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest3(self , M=10, P=20,N1=15, N2=24):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest3>
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
齒輪1齒數:<select name=N1><br />
<option>15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select>
齒輪2齒數:<select name=N2>
<option>15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select><br />
<input type=submit value=send>
</form>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa ='''+str(P)+'''
# m 為模數
m = '''+str(M)+'''
# 第1齒輪齒數
n_g1 = '''+str(N1)+'''
# 第2齒輪齒數
n_g2 = '''+str(N2)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
# 繪圖第1齒輪的圓心座標
x_g1 = 400
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1
y_g2 = y_g1+ rp_g1 + rp_g2
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="2500" height="1500"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest4(self , M=10, P=20,N1=15, N2=24,N3=15,N4=24 ):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest4>
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
齒輪1齒數:<select name=N1>
<option selected="true">15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select>
齒輪2齒數:<select name=N2>
<option>15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select>
齒輪3齒數:<select name=N3>
<option selected="true">15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select>
齒輪4齒數:<select name=N4>
<option>15</option>
<option>16</option>
<option>17</option>
<option>18</option>
<option>19</option>
<option>20</option>
<option>21</option>
<option>22</option>
<option>23</option>
<option>24</option>
<option>25</option>
<option>26</option>
<option>27</option>
<option>28</option>
<option>29</option>
<option>30</option>
<option>31</option>
<option>32</option>
<option>33</option>
<option>34</option>
<option>35</option>
<option>36</option>
<option>37</option>
<option>38</option>
<option>39</option>
<option>40</option>
<option>41</option>
<option>42</option>
<option>43</option>
<option>44</option>
<option>45</option>
<option>46</option>
<option>47</option>
<option>48</option>
<option>49</option>
<option>50</option>
<option>51</option>
<option>52</option>
<option>53</option>
<option>54</option>
<option>55</option>
<option>56</option>
<option>57</option>
<option>58</option>
<option>59</option>
<option>60</option>
<option>61</option>
<option>62</option>
<option>63</option>
<option>64</option>
<option>65</option>
<option>66</option>
<option>67</option>
<option>68</option>
<option>69</option>
<option>70</option>
<option>71</option>
<option>72</option>
<option>73</option>
<option>74</option>
<option>75</option>
<option>76</option>
<option>77</option>
<option>78</option>
<option>79</option>
<option>80</option>
</select>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa ='''+str(P)+'''
# m 為模數
m = '''+str(M)+'''
# 第1齒輪齒數
n_g1 = '''+str(N1)+'''
# 第2齒輪齒數
n_g2 = '''+str(N2)+'''
# 第3齒輪齒數
n_g3 = '''+str(N3)+'''
# 第4齒輪齒數
n_g4 = '''+str(N4)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
rp_g4 = m*n_g4/2
# 繪圖第1齒輪的圓心座標
x_g1 = 300
y_g1 = 300
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1
y_g2 = y_g1 + rp_g1 + rp_g2
# 第3齒輪的圓心座標
x_g3 = x_g2 + rp_g2+rp_g3
y_g3 = y_g1 + rp_g1 + rp_g2
# 第4齒輪的圓心座標
x_g4 = x_g1 + rp_g2+rp_g3
y_g4= y_g1 + rp_g1 + rp_g2+rp_g3+rp_g4
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
ctx.rotate(-pi/2-pi/n_g3+(pi/2+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
#齒輪4
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g4, y_g4)
# rotate to engage
ctx.rotate(-pi/n_g4+(-pi/2+pi/n_g3)*n_g3/n_g4-(pi/2+pi/n_g2)*n_g2/n_g4)
# put it back
ctx.translate(-x_g4, -y_g4)
spur.Spur(ctx).Gear(x_g4, y_g4, rp_g4, n_g4, pa, "pink")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.man = man.MAN()
root.man2 = man2.MAN()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
| gpl-3.0 |
Osndok/zim-desktop-wiki | zim/export/__init__.py | 1 | 2943 | # -*- coding: utf-8 -*-
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module contains the framework for exporting data from zim.
The main API for exporting from the application is the L{Exporter}
object. There are subclasses of Exporter to export to multiple files,
to a single file or to a MHTML file.
To configure the exporter object an additional L{ExportLayout} object
is used. This layout defines the exact mapping of pages to files to
be used.
To specific the pages to export a subclass of the L{PageSelection} class
is used. There are selection classes to export the whole notebook or
to export a single page.
The L{ExportTemplateContext} object defines specific template parameters
for exporting. See also L{zim.templates} for template parsing and
processing classes.
See the module functions for convenient standard configurations.
'''
# TODO test with fake file / dir objects ! Speedy test of all combos
# TODO - when exporting with namespace / prefix we should also trim
# links within a SingleFile output relative to that prefix
# --> do not leak info on parent namespace names
from zim.fs import Dir, File
from zim.templates import get_template
from zim.formats import get_format
def build_notebook_exporter(dir, format, template, **opts):
'''Returns an L{Exporter} that is suitable for exporting a whole
notebook to a folder with one file per page
'''
from zim.export.layouts import MultiFileLayout
from zim.export.exporters.files import MultiFileExporter
template = get_template(format, template)
ext = get_format(format).info['extension']
layout = MultiFileLayout(dir, ext)
return MultiFileExporter(layout, template, format, **opts)
def build_page_exporter(file, format, template, page, **opts):
'''Returns an L{Exporter} that is suitable for exporting a page with
subpages to a file and a folder (e.g. "page.html" with "page_files/")
'''
from zim.export.layouts import FileLayout
from zim.export.exporters.files import MultiFileExporter
template = get_template(format, template)
ext = get_format(format).info['extension']
layout = FileLayout(file, page, ext)
return MultiFileExporter(layout, template, format, **opts)
def build_single_file_exporter(file, format, template, namespace=None, **opts):
'''Returns an L{Exporter} that is suitable for exporting a set of
pages to a single file
'''
from zim.export.layouts import SingleFileLayout
from zim.export.exporters.files import SingleFileExporter
template = get_template(format, template)
layout = SingleFileLayout(file)
return SingleFileExporter(layout, template, format, **opts)
def build_mhtml_file_exporter(file, template, **opts):
'''Returns an L{Exporter} that is suitable for exporting a set of
pages to a single mhtml file
'''
from zim.export.exporters.mhtml import MHTMLExporter
template = get_template('html', template)
return MHTMLExporter(file, template, **opts)
| gpl-2.0 |
mitdrc/director | src/python/director/switchplanner.py | 5 | 12264 | import os
import sys
import vtkAll as vtk
import math
import time
import types
import functools
import numpy as np
from director import transformUtils
from director import lcmUtils
from director.timercallback import TimerCallback
from director.asynctaskqueue import AsyncTaskQueue
from director import objectmodel as om
from director import visualization as vis
from director import applogic as app
from director.debugVis import DebugData
from director import ikplanner
from director.ikparameters import IkParameters
from director import ioUtils
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import affordanceitems
from director import robotstate
from director import robotplanlistener
from director import segmentation
from director import planplayback
from director import affordanceupdater
from director import segmentationpanel
from director import vtkNumpy as vnp
from director.tasks.taskuserpanel import TaskUserPanel
from director.tasks.taskuserpanel import ImageBasedAffordanceFit
import director.tasks.robottasks as rt
import director.tasks.taskmanagerwidget as tmw
import drc as lcmdrc
import copy
from PythonQt import QtCore, QtGui
class SwitchPlanner(object):
def __init__(self, robotSystem):
self.robotSystem = robotSystem
self.robotModel = robotSystem.robotStateModel
self.ikPlanner = robotSystem.ikPlanner
self.lockBackForManip = True
self.lockBaseForManip = True
self.graspingHand = 'right'
self.assignFrames()
self.plans = []
def assignFrames(self):
# foot to box
self.footToBox = transformUtils.transformFromPose(np.array([-0.6436723 , 0.18848073, -1.13987699]),
np.array([ 0.99576385, 0. , 0. , -0.09194753]))
# self.palmToBox = transformUtils.transformFromPose(np.array([-0.13628039, -0.12582009, 0.33638863]), np.array([-0.69866187, 0.07267815, 0.70683338, 0.08352274]))
self.palmToBox = transformUtils.transformFromPose(np.array([-0.13516451, -0.12463758, 0.25173153]), np.array([-0.69867721, 0.07265162, 0.70682793, 0.08346358]))
pinchToBox = self.getPinchToPalmFrame()
pinchToBox.PostMultiply()
pinchToBox.Concatenate(self.palmToBox)
self.pinchToBox = pinchToBox
# self.pinchToBox =
def spawnBoxAffordanceAtFrame(self, boxFrame):
print 'spawning switch box affordance'
dimensions = [0.08, 0.19, 0.25]
depth = dimensions[0]
boxFrame.PreMultiply()
boxFrame.Translate(depth/2.0, 0.0, 0.0)
pose = transformUtils.poseFromTransform(boxFrame)
desc = dict(classname='BoxAffordanceItem', Name='Switch Box', Dimensions=dimensions, pose=pose, Color=[0,1,0])
self.boxAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)
self.updateReachFrame()
def updateReachFrame(self):
graspFrame = transformUtils.copyFrame(self.pinchToBox)
boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform
graspFrame.PostMultiply()
graspFrame.Concatenate(boxFrame)
vis.updateFrame(graspFrame, 'pinch reach frame', scale=0.2)
def planArmsPrep1(self, startPose=None):
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_arms_prep1_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'surprise:switch', 'arm_balance', side='left')
endPose = ikPlanner.getMergedPostureFromDatabase(endPose, 'surprise:switch', 'reach_up_2', side='right')
ikParameters = IkParameters(maxDegreesPerSecond=30)
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planArmsPrep2(self, startPose=None):
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_arms_prep2_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'surprise:switch', 'reach_up_1', side='right')
ikParameters = IkParameters(maxDegreesPerSecond=30)
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planReach(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_reach_start'
endPoseName = 'q_reach_end'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
side = 'right'
movingReachConstraint = ikPlanner.createMovingReachConstraints(startPoseName, lockBase=True, lockBack=True, lockArm=True, side=side)
palmToHand = ikPlanner.getPalmToHandLink(side=side)
targetFrame = om.findObjectByName('reach frame').transform
poseConstraints = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame, graspToHandLinkFrame=palmToHand, angleToleranceInDegrees=5.0)
constraints = []
constraints.extend(movingReachConstraint)
constraints.extend(poseConstraints)
constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(maxDegreesPerSecond=30)
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'surprise:switch', 'above_switch', side='right')
seedPoseName = 'q_above_switch'
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
endPose, info = constraintSet.runIk()
plan = constraintSet.planEndPoseGoal()
self.addPlan(plan)
def planPinchReach(self, maxDegreesPerSecond=None):
if maxDegreesPerSecond is None:
maxDegreesPerSecond=10
ikPlanner = self.ikPlanner
targetFrame = om.findObjectByName('pinch reach frame').transform
pinchToHand = self.getPinchToHandFrame()
startPose = self.getPlanningStartPose()
constraintSet = self.computeGraspPose(startPose, targetFrame, graspToHand=pinchToHand)
constraintSet.ikParameters = IkParameters(maxDegreesPerSecond=maxDegreesPerSecond)
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'surprise:switch', 'above_switch', side='right')
seedPoseName = 'q_above_switch'
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
endPose, info = constraintSet.runIk()
plan = constraintSet.planEndPoseGoal()
self.addPlan(plan)
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.q
def addPlan(self, plan):
self.plans.append(plan)
def planWalking(self):
startPose = self.getPlanningStartPose()
walkingPlan = self.footstepPlanner.sendWalkingPlanRequest(self.footstepPlan, startPose, waitForResponse=True)
self.addPlan(walkingPlan)
def getStanceFrame(self):
return self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False)
def spawnBoxAffordance(self):
stanceFrame = self.getStanceFrame()
boxFrame = transformUtils.copyFrame(stanceFrame)
boxFrame.PreMultiply()
boxFrame.Concatenate(self.footToBox.GetLinearInverse())
self.spawnBoxAffordanceAtFrame(boxFrame)
def spawnFootstepFrame(self):
# should snap this to
boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform
goalFrame = transformUtils.copyFrame(self.footToBox)
goalFrame.PostMultiply()
goalFrame.Concatenate(boxFrame)
# translate goal frame to match current robot height
stanceFrame = self.getStanceFrame()
stanceHeight = stanceFrame.GetPosition()[2]
goalHeight = goalFrame.GetPosition()[2]
goalFrame.PreMultiply()
goalFrame.Translate(0.0, 0.0, stanceHeight - goalHeight)
vis.updateFrame(goalFrame, 'switch box stance frame', scale=0.2)
def planNominal(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'safe nominal')
endPose, info = ikPlanner.computeStandPose(endPose)
newPlan = ikPlanner.computePostureGoal(startPose, endPose)
self.addPlan(newPlan)
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def getPinchToHandFrame(self):
pinchToHand = transformUtils.transformFromPose(np.array([ -1.22270636e-07, -3.11575498e-01, 0.00000000e+00]), np.array([ 3.26794897e-07, -2.42861455e-17, -1.85832253e-16,
1.00000000e+00]))
return pinchToHand
def getPinchToPalmFrame(self):
pinchToPalm = transformUtils.copyFrame(self.getPinchToHandFrame())
palmToHand = self.ikPlanner.getPalmToHandLink(side='right')
pinchToPalm.PostMultiply()
pinchToPalm.Concatenate(palmToHand.GetLinearInverse())
return pinchToPalm
def getThumbToPalmFrame(self):
return vtk.vtkTransform()
def getGraspToHandFrame(self):
mode = 'palm'
graspToPalm = {'palm':vtk.vtkTransform,
'pinch':self.getPinchToPalmFrame,
'thumb':self.getThumbToPalmFrame}[mode]()
return self.ikPlanner.newGraspToHandFrame(self.graspingHand, graspToPalmFrame=graspToPalm)
def computeGraspPose(self, startPose, targetFrame, graspToHand=None):
side = self.graspingHand
if graspToHand is None:
graspToHand = self.ikPlanner.getPalmToHandLink(side=self.graspingHand)
constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, targetFrame, lockBase=self.lockBaseForManip, lockBack=self.lockBackForManip, graspToHandLinkFrame=graspToHand)
return constraintSet
def computeGraspPlan(self, targetFrame, graspToHandFrame, inLine=False, ikParameters=None):
startPose = self.getPlanningStartPose()
endPose, constraintSet = self.computeGraspPose(startPose, targetFrame)
if ikParameters:
constraintSet.ikParameters = ikParameters
constraintSet.ikParameters.usePointwise = False
if inLine:
handLinkName = self.ikPlanner.getHandLink(self.graspingHand)
graspToHand = graspToHandFrame
handToWorld1 = self.ikPlanner.getLinkFrameAtPose(handLinkName, startPose)
handToWorld2 = self.ikPlanner.getLinkFrameAtPose(handLinkName, endPose)
handToWorld1 = transformUtils.concatenateTransforms([graspToHand, handToWorld1])
handToWorld2 = transformUtils.concatenateTransforms([graspToHand, handToWorld2])
motionVector = np.array(handToWorld2.GetPosition()) - np.array(handToWorld1.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(handToWorld2.GetPosition()), motionVector)
#vis.updateFrame(motionTargetFrame, 'motion target frame', scale=0.1)
#d = DebugData()
#d.addLine(np.array(handToWorld2.GetPosition()), np.array(handToWorld2.GetPosition()) - motionVector)
#vis.updatePolyData(d.getPolyData(), 'motion vector', visible=False)
p = self.ikPlanner.createLinePositionConstraint(handLinkName, graspToHand, motionTargetFrame,
lineAxis=2, bounds=[-np.linalg.norm(motionVector), 0.001], positionTolerance=0.001)
p.tspan = np.linspace(0, 1, 5)
constraintSet.constraints.append(p)
newPlan = constraintSet.runIkTraj()
else:
newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)
return newPlan
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
UXE/local-edx | common/test/acceptance/pages/lms/dashboard.py | 17 | 3339 | # -*- coding: utf-8 -*-
"""
Student dashboard page.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from . import BASE_URL
class DashboardPage(PageObject):
"""
Student dashboard, where the student can view
courses she/he has registered for.
"""
url = BASE_URL + "/dashboard"
def is_browser_on_page(self):
return self.q(css='section.my-courses').present
@property
def current_courses_text(self):
"""
This is the title label for the section of the student dashboard that
shows all the courses that the student is enrolled in.
The string displayed is defined in lms/templates/dashboard.html.
"""
text_items = self.q(css='section#my-courses').text
if len(text_items) > 0:
return text_items[0]
else:
return ""
@property
def available_courses(self):
"""
Return list of the names of available courses (e.g. "999 edX Demonstration Course")
"""
def _get_course_name(el):
# The first component in the link text is the course number
_, course_name = el.text.split(' ', 1)
return course_name
return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results
def view_course(self, course_id):
"""
Go to the course with `course_id` (e.g. edx/Open_DemoX/edx_demo_course)
"""
link_css = self._link_css(course_id)
if link_css is not None:
self.q(css=link_css).first.click()
else:
msg = "No links found for course {0}".format(course_id)
self.warning(msg)
def _link_css(self, course_id):
"""
Return a CSS selector for the link to the course with `course_id`.
"""
# Get the link hrefs for all courses
all_links = self.q(css='a.enter-course').map(lambda el: el.get_attribute('href')).results
# Search for the first link that matches the course id
link_index = None
for index in range(len(all_links)):
if course_id in all_links[index]:
link_index = index
break
if link_index is not None:
return "a.enter-course:nth-of-type({0})".format(link_index + 1)
else:
return None
def change_language(self, code):
"""
Change the language on the dashboard to the language corresponding with `code`.
"""
self.q(css=".edit-language").first.click()
self.q(css='select[name="language"] option[value="{}"]'.format(code)).first.click()
self.q(css="#submit-lang").first.click()
# Clicking the submit-lang button does a jquery ajax post, so make sure that
# has completed before continuing on.
self.wait_for_ajax()
self._changed_lang_promise(code).fulfill()
def _changed_lang_promise(self, code):
def _check_func():
language_is_selected = self.q(css='select[name="language"] option[value="{}"]'.format(code)).selected
modal_is_visible = self.q(css='section#change_language.modal').visible
return (language_is_selected and not modal_is_visible)
return EmptyPromise(_check_func, "language changed and modal hidden")
| agpl-3.0 |
ValvePython/vdf | tests/test_binary_vdf.py | 1 | 6876 | import sys
import unittest
import vdf
from io import BytesIO
from collections import OrderedDict
u = str if sys.version_info >= (3,) else unicode
class BinaryVDF(unittest.TestCase):
def test_BASE_INT(self):
repr(vdf.BASE_INT())
def test_simple(self):
pairs = [
('a', 'test'),
('a2', b'\xd0\xb0\xd0\xb1\xd0\xb2\xd0\xb3'.decode('utf-8')),
('bb', 1),
('bb2', -500),
('ccc', 1.0),
('dddd', vdf.POINTER(1234)),
('fffff', vdf.COLOR(1234)),
('gggggg', vdf.UINT_64(1234)),
('hhhhhhh', vdf.INT_64(-1234)),
]
data = OrderedDict(pairs)
data['level1-1'] = OrderedDict(pairs)
data['level1-1']['level2-1'] = OrderedDict(pairs)
data['level1-1']['level2-2'] = OrderedDict(pairs)
data['level1-2'] = OrderedDict(pairs)
result = vdf.binary_loads(vdf.binary_dumps(data), mapper=OrderedDict)
self.assertEqual(data, result)
result = vdf.binary_loads(vdf.binary_dumps(data, alt_format=True), mapper=OrderedDict, alt_format=True)
self.assertEqual(data, result)
result = vdf.vbkv_loads(vdf.vbkv_dumps(data), mapper=OrderedDict)
self.assertEqual(data, result)
def test_vbkv_empty(self):
with self.assertRaises(ValueError):
vdf.vbkv_loads(b'')
def test_loads_empty(self):
self.assertEqual(vdf.binary_loads(b''), {})
self.assertEqual(vdf.binary_load(BytesIO(b'')), {})
def test_dumps_empty(self):
self.assertEqual(vdf.binary_dumps({}), b'')
buf = BytesIO()
vdf.binary_dump({}, buf)
self.assertEqual(buf.getvalue(), b'')
def test_dumps_unicode(self):
self.assertEqual(vdf.binary_dumps({u('a'): u('b')}), b'\x01a\x00b\x00\x08')
def test_dumps_unicode_alternative(self):
self.assertEqual(vdf.binary_dumps({u('a'): u('b')}, alt_format=True), b'\x01a\x00b\x00\x0b')
def test_dump_params_invalid(self):
with self.assertRaises(TypeError):
vdf.binary_dump([], BytesIO())
with self.assertRaises(TypeError):
vdf.binary_dump({}, b'aaaa')
def test_dumps_params_invalid(self):
with self.assertRaises(TypeError):
vdf.binary_dumps([])
with self.assertRaises(TypeError):
vdf.binary_dumps(b'aaaa')
def test_dumps_key_invalid_type(self):
with self.assertRaises(TypeError):
vdf.binary_dumps({1:1})
with self.assertRaises(TypeError):
vdf.binary_dumps({None:1})
def test_dumps_value_invalid_type(self):
with self.assertRaises(TypeError):
vdf.binary_dumps({'': None})
def test_alternative_format(self):
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x00a\x00\x00b\x00\x0b\x0b')
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x00a\x00\x00b\x00\x08\x08', alt_format=True)
def test_load_params_invalid(self):
with self.assertRaises(TypeError):
vdf.binary_load(b'aaaa')
with self.assertRaises(TypeError):
vdf.binary_load(1234)
with self.assertRaises(TypeError):
vdf.binary_load(BytesIO(b'aaaa'), b'bbbb')
def test_loads_params_invalid(self):
with self.assertRaises(TypeError):
vdf.binary_loads([])
with self.assertRaises(TypeError):
vdf.binary_loads(11111)
with self.assertRaises(TypeError):
vdf.binary_loads(BytesIO())
with self.assertRaises(TypeError):
vdf.binary_load(b'', b'bbbb')
def test_loads_unbalanced_nesting(self):
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x00a\x00\x00b\x00\x08')
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x00a\x00\x00b\x00\x08\x08\x08\x08')
def test_loads_unknown_type(self):
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x33a\x00\x08')
def test_loads_unterminated_string(self):
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x01abbbb')
def test_loads_type_checks(self):
with self.assertRaises(TypeError):
vdf.binary_loads(None)
with self.assertRaises(TypeError):
vdf.binary_loads(b'', mapper=list)
def test_merge_multiple_keys_on(self):
# VDFDict([('a', VDFDict([('a', '1'), ('b', '2')])), ('a', VDFDict([('a', '3'), ('c', '4')]))])
test = b'\x00a\x00\x01a\x001\x00\x01b\x002\x00\x08\x00a\x00\x01a\x003\x00\x01c\x004\x00\x08\x08'
result = {'a': {'a': '3', 'b': '2', 'c': '4'}}
self.assertEqual(vdf.binary_loads(test, merge_duplicate_keys=True), result)
def test_merge_multiple_keys_off(self):
# VDFDict([('a', VDFDict([('a', '1'), ('b', '2')])), ('a', VDFDict([('a', '3'), ('c', '4')]))])
test = b'\x00a\x00\x01a\x001\x00\x01b\x002\x00\x08\x00a\x00\x01a\x003\x00\x01c\x004\x00\x08\x08'
result = {'a': {'a': '3', 'c': '4'}}
self.assertEqual(vdf.binary_loads(test, merge_duplicate_keys=False), result)
def test_raise_on_remaining(self):
# default binary_loads is to raise
with self.assertRaises(SyntaxError):
vdf.binary_loads(b'\x01key\x00value\x00\x08' + b'aaaa')
# do not raise
self.assertEqual(vdf.binary_loads(b'\x01key\x00value\x00\x08' + b'aaaa', raise_on_remaining=False), {'key': 'value'})
def test_raise_on_remaining_with_file(self):
buf = BytesIO(b'\x01key\x00value\x00\x08' + b'aaaa')
# binary_load doesn't raise by default
self.assertEqual(vdf.binary_load(buf), {'key': 'value'})
self.assertEqual(buf.read(), b'aaaa')
# raise when extra data remains
buf.seek(0)
with self.assertRaises(SyntaxError):
vdf.binary_load(buf, raise_on_remaining=True)
self.assertEqual(buf.read(), b'aaaa')
def test_vbkv_loads_empty(self):
with self.assertRaises(ValueError):
vdf.vbkv_loads(b'')
def test_vbkv_dumps_empty(self):
self.assertEqual(vdf.vbkv_dumps({}), b'VBKV\x00\x00\x00\x00')
def test_vbkv_loads_invalid_header(self):
with self.assertRaises(ValueError):
vdf.vbkv_loads(b'DD1235764tdffhghsdf')
def test_vbkv_loads_invalid_checksum(self):
with self.assertRaises(ValueError):
vdf.vbkv_loads(b'VBKV\x01\x02\x03\x04\x00a\x00\x0b\x0b')
def test_loads_utf8_invalmid(self):
self.assertEqual({'aaa': b'bb\xef\xbf\xbdbb'.decode('utf-8')}, vdf.binary_loads(b'\x01aaa\x00bb\xffbb\x00\x08'))
def test_loads_utf16(self):
self.assertEqual({'aaa': b'b\x00b\x00\xff\xffb\x00b\x00'.decode('utf-16le')}, vdf.binary_loads(b'\x05aaa\x00b\x00b\x00\xff\xffb\x00b\x00\x00\x00\x08'))
| mit |
alash3al/rethinkdb | external/gtest_1.6.0/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
40223240/cadb_g3_0420 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/__init__.py | 873 | 4019 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
| gpl-3.0 |
varunnaganathan/django | tests/validation/test_picklable.py | 576 | 2010 | import pickle
from unittest import TestCase
from django.core.exceptions import ValidationError
class PickableValidationErrorTestCase(TestCase):
def test_validationerror_is_picklable(self):
original = ValidationError('a', code='something')
unpickled = pickle.loads(pickle.dumps(original))
self.assertIs(unpickled, unpickled.error_list[0])
self.assertEqual(original.message, unpickled.message)
self.assertEqual(original.code, unpickled.code)
original = ValidationError('a', code='something')
unpickled = pickle.loads(pickle.dumps(ValidationError(original)))
self.assertIs(unpickled, unpickled.error_list[0])
self.assertEqual(original.message, unpickled.message)
self.assertEqual(original.code, unpickled.code)
original = ValidationError(['a', 'b'])
unpickled = pickle.loads(pickle.dumps(original))
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
original = ValidationError(['a', 'b'])
unpickled = pickle.loads(pickle.dumps(ValidationError(original)))
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
original = ValidationError([ValidationError('a'), ValidationError('b')])
unpickled = pickle.loads(pickle.dumps(original))
self.assertIs(unpickled.args[0][0], unpickled.error_list[0])
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
message_dict = {'field1': ['a', 'b'], 'field2': ['c', 'd']}
original = ValidationError(message_dict)
unpickled = pickle.loads(pickle.dumps(original))
self.assertEqual(unpickled.message_dict, message_dict)
| bsd-3-clause |
manuelnaranjo/LightBlue---Mac | src/mac/_IOBluetooth.py | 1 | 3556 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
"""
Provides a python interface to the Mac OSX IOBluetooth Framework classes,
through PyObjC.
For example:
>>> from lightblue import _IOBluetooth
>>> for d in _IOBluetooth.IOBluetoothDevice.recentDevices_(0):
... print d.getName()
...
Munkey
Adam
My Nokia 6600
>>>
See http://developer.apple.com/documentation/DeviceDrivers/Reference/IOBluetooth/index.html
for Apple's IOBluetooth documentation.
See http://pyobjc.sourceforge.net for details on how to access Objective-C
classes through PyObjC.
"""
import objc
try:
# mac os 10.5 loads frameworks using bridgesupport metadata
__bundle__ = objc.initFrameworkWrapper("IOBluetooth",
frameworkIdentifier="com.apple.IOBluetooth",
frameworkPath=objc.pathForFramework(
"/System/Library/Frameworks/IOBluetooth.framework"),
globals=globals())
except AttributeError:
# earlier versions use loadBundle() and setSignatureForSelector()
objc.loadBundle("IOBluetooth", globals(),
bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/IOBluetooth.framework'))
# Sets selector signatures in order to receive arguments correctly from
# PyObjC methods. These MUST be set, otherwise the method calls won't work
# at all, mostly because you can't pass by pointers in Python.
# set to return int, and take an unsigned char output arg
# i.e. in python: return (int, unsigned char) and accept no args
objc.setSignatureForSelector("IOBluetoothSDPServiceRecord",
"getRFCOMMChannelID:", "i12@0:o^C")
# set to return int, and take an unsigned int output arg
# i.e. in python: return (int, unsigned int) and accept no args
objc.setSignatureForSelector("IOBluetoothSDPServiceRecord",
"getL2CAPPSM:", "i12@0:o^S")
# set to return int, and take (output object, unsigned char, object) args
# i.e. in python: return (int, object) and accept (unsigned char, object)
objc.setSignatureForSelector("IOBluetoothDevice",
"openRFCOMMChannelSync:withChannelID:delegate:", "i16@0:o^@C@")
# set to return int, and take (output object, unsigned int, object) args
# i.e. in python: return (int, object) and accept (unsigned int, object)
objc.setSignatureForSelector("IOBluetoothDevice",
"openL2CAPChannelSync:withPSM:delegate:", "i20@0:o^@I@")
# set to return int, take a const 6-char array arg
# i.e. in python: return object and accept 6-char list
# this seems to work even though the selector doesn't take a char aray,
# it takes a struct 'BluetoothDeviceAddress' which contains a char array.
objc.setSignatureForSelector("IOBluetoothDevice",
"withAddress:", '@12@0:r^[6C]')
del objc
| gpl-3.0 |
willemneal/Docky | lib/unidecode/x075.py | 253 | 4675 | data = (
'Zhui ', # 0x00
'Ping ', # 0x01
'Bian ', # 0x02
'Zhou ', # 0x03
'Zhen ', # 0x04
'Senchigura ', # 0x05
'Ci ', # 0x06
'Ying ', # 0x07
'Qi ', # 0x08
'Xian ', # 0x09
'Lou ', # 0x0a
'Di ', # 0x0b
'Ou ', # 0x0c
'Meng ', # 0x0d
'Zhuan ', # 0x0e
'Peng ', # 0x0f
'Lin ', # 0x10
'Zeng ', # 0x11
'Wu ', # 0x12
'Pi ', # 0x13
'Dan ', # 0x14
'Weng ', # 0x15
'Ying ', # 0x16
'Yan ', # 0x17
'Gan ', # 0x18
'Dai ', # 0x19
'Shen ', # 0x1a
'Tian ', # 0x1b
'Tian ', # 0x1c
'Han ', # 0x1d
'Chang ', # 0x1e
'Sheng ', # 0x1f
'Qing ', # 0x20
'Sheng ', # 0x21
'Chan ', # 0x22
'Chan ', # 0x23
'Rui ', # 0x24
'Sheng ', # 0x25
'Su ', # 0x26
'Sen ', # 0x27
'Yong ', # 0x28
'Shuai ', # 0x29
'Lu ', # 0x2a
'Fu ', # 0x2b
'Yong ', # 0x2c
'Beng ', # 0x2d
'Feng ', # 0x2e
'Ning ', # 0x2f
'Tian ', # 0x30
'You ', # 0x31
'Jia ', # 0x32
'Shen ', # 0x33
'Zha ', # 0x34
'Dian ', # 0x35
'Fu ', # 0x36
'Nan ', # 0x37
'Dian ', # 0x38
'Ping ', # 0x39
'Ting ', # 0x3a
'Hua ', # 0x3b
'Ting ', # 0x3c
'Quan ', # 0x3d
'Zi ', # 0x3e
'Meng ', # 0x3f
'Bi ', # 0x40
'Qi ', # 0x41
'Liu ', # 0x42
'Xun ', # 0x43
'Liu ', # 0x44
'Chang ', # 0x45
'Mu ', # 0x46
'Yun ', # 0x47
'Fan ', # 0x48
'Fu ', # 0x49
'Geng ', # 0x4a
'Tian ', # 0x4b
'Jie ', # 0x4c
'Jie ', # 0x4d
'Quan ', # 0x4e
'Wei ', # 0x4f
'Fu ', # 0x50
'Tian ', # 0x51
'Mu ', # 0x52
'Tap ', # 0x53
'Pan ', # 0x54
'Jiang ', # 0x55
'Wa ', # 0x56
'Da ', # 0x57
'Nan ', # 0x58
'Liu ', # 0x59
'Ben ', # 0x5a
'Zhen ', # 0x5b
'Chu ', # 0x5c
'Mu ', # 0x5d
'Mu ', # 0x5e
'Ce ', # 0x5f
'Cen ', # 0x60
'Gai ', # 0x61
'Bi ', # 0x62
'Da ', # 0x63
'Zhi ', # 0x64
'Lue ', # 0x65
'Qi ', # 0x66
'Lue ', # 0x67
'Pan ', # 0x68
'Kesa ', # 0x69
'Fan ', # 0x6a
'Hua ', # 0x6b
'Yu ', # 0x6c
'Yu ', # 0x6d
'Mu ', # 0x6e
'Jun ', # 0x6f
'Yi ', # 0x70
'Liu ', # 0x71
'Yu ', # 0x72
'Die ', # 0x73
'Chou ', # 0x74
'Hua ', # 0x75
'Dang ', # 0x76
'Chuo ', # 0x77
'Ji ', # 0x78
'Wan ', # 0x79
'Jiang ', # 0x7a
'Sheng ', # 0x7b
'Chang ', # 0x7c
'Tuan ', # 0x7d
'Lei ', # 0x7e
'Ji ', # 0x7f
'Cha ', # 0x80
'Liu ', # 0x81
'Tatamu ', # 0x82
'Tuan ', # 0x83
'Lin ', # 0x84
'Jiang ', # 0x85
'Jiang ', # 0x86
'Chou ', # 0x87
'Bo ', # 0x88
'Die ', # 0x89
'Die ', # 0x8a
'Pi ', # 0x8b
'Nie ', # 0x8c
'Dan ', # 0x8d
'Shu ', # 0x8e
'Shu ', # 0x8f
'Zhi ', # 0x90
'Yi ', # 0x91
'Chuang ', # 0x92
'Nai ', # 0x93
'Ding ', # 0x94
'Bi ', # 0x95
'Jie ', # 0x96
'Liao ', # 0x97
'Gong ', # 0x98
'Ge ', # 0x99
'Jiu ', # 0x9a
'Zhou ', # 0x9b
'Xia ', # 0x9c
'Shan ', # 0x9d
'Xu ', # 0x9e
'Nue ', # 0x9f
'Li ', # 0xa0
'Yang ', # 0xa1
'Chen ', # 0xa2
'You ', # 0xa3
'Ba ', # 0xa4
'Jie ', # 0xa5
'Jue ', # 0xa6
'Zhi ', # 0xa7
'Xia ', # 0xa8
'Cui ', # 0xa9
'Bi ', # 0xaa
'Yi ', # 0xab
'Li ', # 0xac
'Zong ', # 0xad
'Chuang ', # 0xae
'Feng ', # 0xaf
'Zhu ', # 0xb0
'Pao ', # 0xb1
'Pi ', # 0xb2
'Gan ', # 0xb3
'Ke ', # 0xb4
'Ci ', # 0xb5
'Xie ', # 0xb6
'Qi ', # 0xb7
'Dan ', # 0xb8
'Zhen ', # 0xb9
'Fa ', # 0xba
'Zhi ', # 0xbb
'Teng ', # 0xbc
'Ju ', # 0xbd
'Ji ', # 0xbe
'Fei ', # 0xbf
'Qu ', # 0xc0
'Dian ', # 0xc1
'Jia ', # 0xc2
'Xian ', # 0xc3
'Cha ', # 0xc4
'Bing ', # 0xc5
'Ni ', # 0xc6
'Zheng ', # 0xc7
'Yong ', # 0xc8
'Jing ', # 0xc9
'Quan ', # 0xca
'Chong ', # 0xcb
'Tong ', # 0xcc
'Yi ', # 0xcd
'Kai ', # 0xce
'Wei ', # 0xcf
'Hui ', # 0xd0
'Duo ', # 0xd1
'Yang ', # 0xd2
'Chi ', # 0xd3
'Zhi ', # 0xd4
'Hen ', # 0xd5
'Ya ', # 0xd6
'Mei ', # 0xd7
'Dou ', # 0xd8
'Jing ', # 0xd9
'Xiao ', # 0xda
'Tong ', # 0xdb
'Tu ', # 0xdc
'Mang ', # 0xdd
'Pi ', # 0xde
'Xiao ', # 0xdf
'Suan ', # 0xe0
'Pu ', # 0xe1
'Li ', # 0xe2
'Zhi ', # 0xe3
'Cuo ', # 0xe4
'Duo ', # 0xe5
'Wu ', # 0xe6
'Sha ', # 0xe7
'Lao ', # 0xe8
'Shou ', # 0xe9
'Huan ', # 0xea
'Xian ', # 0xeb
'Yi ', # 0xec
'Peng ', # 0xed
'Zhang ', # 0xee
'Guan ', # 0xef
'Tan ', # 0xf0
'Fei ', # 0xf1
'Ma ', # 0xf2
'Lin ', # 0xf3
'Chi ', # 0xf4
'Ji ', # 0xf5
'Dian ', # 0xf6
'An ', # 0xf7
'Chi ', # 0xf8
'Bi ', # 0xf9
'Bei ', # 0xfa
'Min ', # 0xfb
'Gu ', # 0xfc
'Dui ', # 0xfd
'E ', # 0xfe
'Wei ', # 0xff
)
| mit |
andrejb/leap_mail | src/leap/mail/_version.py | 2 | 16747 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "src/leap/mail/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| gpl-3.0 |
demarle/VTK | ThirdParty/Twisted/twisted/internet/_pollingfile.py | 32 | 8894 | # -*- test-case-name: twisted.internet.test.test_pollingfile -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements a simple polling interface for file descriptors that don't work with
select() - this is pretty much only useful on Windows.
"""
from zope.interface import implements
from twisted.internet.interfaces import IConsumer, IPushProducer
MIN_TIMEOUT = 0.000000001
MAX_TIMEOUT = 0.1
class _PollableResource:
active = True
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class _PollingTimer:
# Everything is private here because it is really an implementation detail.
def __init__(self, reactor):
self.reactor = reactor
self._resources = []
self._pollTimer = None
self._currentTimeout = MAX_TIMEOUT
self._paused = False
def _addPollableResource(self, res):
self._resources.append(res)
self._checkPollingState()
def _checkPollingState(self):
for resource in self._resources:
if resource.active:
self._startPolling()
break
else:
self._stopPolling()
def _startPolling(self):
if self._pollTimer is None:
self._pollTimer = self._reschedule()
def _stopPolling(self):
if self._pollTimer is not None:
self._pollTimer.cancel()
self._pollTimer = None
def _pause(self):
self._paused = True
def _unpause(self):
self._paused = False
self._checkPollingState()
def _reschedule(self):
if not self._paused:
return self.reactor.callLater(self._currentTimeout, self._pollEvent)
def _pollEvent(self):
workUnits = 0.
anyActive = []
for resource in self._resources:
if resource.active:
workUnits += resource.checkWork()
# Check AFTER work has been done
if resource.active:
anyActive.append(resource)
newTimeout = self._currentTimeout
if workUnits:
newTimeout = self._currentTimeout / (workUnits + 1.)
if newTimeout < MIN_TIMEOUT:
newTimeout = MIN_TIMEOUT
else:
newTimeout = self._currentTimeout * 2.
if newTimeout > MAX_TIMEOUT:
newTimeout = MAX_TIMEOUT
self._currentTimeout = newTimeout
if anyActive:
self._pollTimer = self._reschedule()
# If we ever (let's hope not) need the above functionality on UNIX, this could
# be factored into a different module.
import win32pipe
import win32file
import win32api
import pywintypes
class _PollableReadPipe(_PollableResource):
implements(IPushProducer)
def __init__(self, pipe, receivedCallback, lostCallback):
# security attributes for pipes
self.pipe = pipe
self.receivedCallback = receivedCallback
self.lostCallback = lostCallback
def checkWork(self):
finished = 0
fullDataRead = []
while 1:
try:
buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
# finished = (result == -1)
if not bytesToRead:
break
hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
fullDataRead.append(data)
except win32api.error:
finished = 1
break
dataBuf = ''.join(fullDataRead)
if dataBuf:
self.receivedCallback(dataBuf)
if finished:
self.cleanup()
return len(dataBuf)
def cleanup(self):
self.deactivate()
self.lostCallback()
def close(self):
try:
win32api.CloseHandle(self.pipe)
except pywintypes.error:
# You can't close std handles...?
pass
def stopProducing(self):
self.close()
def pauseProducing(self):
self.deactivate()
def resumeProducing(self):
self.activate()
FULL_BUFFER_SIZE = 64 * 1024
class _PollableWritePipe(_PollableResource):
implements(IConsumer)
def __init__(self, writePipe, lostCallback):
self.disconnecting = False
self.producer = None
self.producerPaused = False
self.streamingProducer = 0
self.outQueue = []
self.writePipe = writePipe
self.lostCallback = lostCallback
try:
win32pipe.SetNamedPipeHandleState(writePipe,
win32pipe.PIPE_NOWAIT,
None,
None)
except pywintypes.error:
# Maybe it's an invalid handle. Who knows.
pass
def close(self):
self.disconnecting = True
def bufferFull(self):
if self.producer is not None:
self.producerPaused = True
self.producer.pauseProducing()
def bufferEmpty(self):
if self.producer is not None and ((not self.streamingProducer) or
self.producerPaused):
self.producer.producerPaused = False
self.producer.resumeProducing()
return True
return False
# almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
def registerProducer(self, producer, streaming):
"""Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). A producer should implement the IProducer
interface.
FileDescriptor provides some infrastructure for producer methods.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer))
if not self.active:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
def writeConnectionLost(self):
self.deactivate()
try:
win32api.CloseHandle(self.writePipe)
except pywintypes.error:
# OMG what
pass
self.lostCallback()
def writeSequence(self, seq):
"""
Append a C{list} or C{tuple} of bytes to the output buffer.
@param seq: C{list} or C{tuple} of C{str} instances to be appended to
the output buffer.
@raise TypeError: If C{seq} contains C{unicode}.
"""
if unicode in map(type, seq):
raise TypeError("Unicode not allowed in output buffer.")
self.outQueue.extend(seq)
def write(self, data):
"""
Append some bytes to the output buffer.
@param data: C{str} to be appended to the output buffer.
@type data: C{str}.
@raise TypeError: If C{data} is C{unicode} instead of C{str}.
"""
if isinstance(data, unicode):
raise TypeError("Unicode not allowed in output buffer.")
if self.disconnecting:
return
self.outQueue.append(data)
if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
self.bufferFull()
def checkWork(self):
numBytesWritten = 0
if not self.outQueue:
if self.disconnecting:
self.writeConnectionLost()
return 0
try:
win32file.WriteFile(self.writePipe, '', None)
except pywintypes.error:
self.writeConnectionLost()
return numBytesWritten
while self.outQueue:
data = self.outQueue.pop(0)
errCode = 0
try:
errCode, nBytesWritten = win32file.WriteFile(self.writePipe,
data, None)
except win32api.error:
self.writeConnectionLost()
break
else:
# assert not errCode, "wtf an error code???"
numBytesWritten += nBytesWritten
if len(data) > nBytesWritten:
self.outQueue.insert(0, data[nBytesWritten:])
break
else:
resumed = self.bufferEmpty()
if not resumed and self.disconnecting:
self.writeConnectionLost()
return numBytesWritten
| bsd-3-clause |
chouseknecht/ansible | lib/ansible/modules/cloud/google/gcp_sql_user_info.py | 5 | 5031 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_user_info
description:
- Gather info for GCP User
- This module was called C(gcp_sql_user_facts) before Ansible 2.9. The usage has not
changed.
short_description: Gather info for GCP User
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
type: dict
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: get info on a user
gcp_sql_user_info:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
host:
description:
- The host name from which the user can connect. For insert operations, host
defaults to an empty string. For update operations, host is specified as part
of the request URL. The host name cannot be updated after insertion.
returned: success
type: str
name:
description:
- The name of the user in the Cloud SQL instance.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: dict
password:
description:
- The password for the user.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict')))
if module._name == 'gcp_sql_user_facts':
module.deprecate("The 'gcp_sql_user_facts' module has been renamed to 'gcp_sql_user_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'sql')
return auth.list(link, return_if_object, array_name='items')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
40223136/2015cdag1man | static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/surface.py | 603 | 3844 | from browser import document, html, window
from javascript import console, JSConstructor
from .rect import Rect
#import pygame.rect
canvas_ID=1
_canvas_id=None
class Surface:
def __init__(self, dim=[], depth=16, surf=None):
if surf is None:
self._depth=depth
self._canvas=html.CANVAS(width=dim[0], height=dim[1])
elif isinstance(surf, Surface):
self._canvas=surf.copy()
#self._width=surf.get_width()
#self._height=surf.get_height()
elif isinstance(surf, html.CANVAS):
self._canvas=surf
#self._width=surf.style.width
#self._height=surf.style.height
self._context=self._canvas.getContext('2d')
self._canvas.id='layer_%s' % canvas_ID
#setattr(self._canvas.style, 'z-index',canvas_ID)
#setattr(self._canvas.style, 'position', 'relative')
#setattr(self._canvas.style, 'left', '0px')
#setattr(self._canvas.style, 'top', '0px')
canvas_ID+=1
#document['pydiv'] <= self._canvas
def blit(self, source, dest, area=None, special_flags=0):
#if area is None and isinstance(source, str):
# _img = JSConstructor(window.Image)()
# _img.src = source
# def img_onload(*args):
# self._context.drawImage(_img, dest[0], dest[1])
# _img.onload=img_onload
# _img.width, _img.height
global _canvas_id
if _canvas_id is None:
try:
_canvas_id=document.get(selector='canvas')[0].getAttribute('id')
except:
pass
if self._canvas.id == _canvas_id:
self._canvas.width=self._canvas.width
if area is None:
#lets set area to the size of the source
if isinstance(source, Surface):
area=[(0, 0), (source.canvas.width, source.canvas.height)]
if isinstance(source, Surface):
_ctx=source.canvas.getContext('2d')
_subset=_ctx.getImageData(area[0][0],area[0][1], area[1][0], area[1][1])
# we want just a subset of the source image copied
self._context.putImageData(_subset, dest[0], dest[1])
#print(dest[0], dest[1], _subset.width, _subset.height)
return Rect(dest[0], dest[1], dest[0]+_subset.width, dest[1]+_subset.height)
def convert(self, surface=None):
## fix me...
return self
def copy(self):
_imgdata=self._context.toDataURL('image/png')
_canvas=html.CANVAS(width=self._canvas.width,height=self._canvas.height)
_ctx=_canvas.getContext('2d')
_ctx.drawImage(_imgdata, 0, 0)
return _canvas
def fill(self, color):
""" fill canvas with this color """
self._context.fillStyle="rgb(%s,%s,%s)" % color
#console.log(self._canvas.width, self._canvas.height, self._context.fillStyle)
self._context.fillRect(0,0,self._canvas.width,self._canvas.height)
#self._context.fill()
@property
def height(self):
return int(self._canvas.height)
@property
def width(self):
return int(self._canvas.width)
@property
def canvas(self):
return self._canvas
def scroll(self, dx=0, dy=0):
_imgdata=self._context.toDataURL('image/png')
self._context.drawImage(_imgdata, dx, dy)
def get_at(self, pos):
#returns rgb
return self._context.getImageData(pos[0], pos[1],1,1).data
def set_at(self, pos, color):
self._context.fillStyle='rgb(%s,%s,%s)' % color
self._context.fillRect(pos[0], pos[1], 1, 1)
def get_size(self):
return self._canvas.width, self._canvas.height
def get_width(self):
return self._canvas.width
def get_height(self):
return self._canvas.height
def get_rect(self, centerx=None, centery=None):
return Rect(0, 0, self._canvas.width, self._canvas.height)
def set_colorkey(self, key, val):
pass
| gpl-3.0 |
sunqm/pyscf | pyscf/pbc/tdscf/test/test_rhf_slow.py | 2 | 3697 | from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import RHF
from pyscf.pbc.tdscf import TDHF
from pyscf.pbc.tdscf.rhf_slow import PhysERI, PhysERI4, PhysERI8, TDRHF
from pyscf.tdscf.common_slow import eig
from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close
import unittest
from numpy import testing
class DiamondTestGamma(unittest.TestCase):
"""Compare this (rhf_slow) vs reference (pyscf)."""
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
cls.model_rhf = model_rhf = RHF(cell)
model_rhf.kernel()
cls.td_model_rhf = td_model_rhf = TDHF(model_rhf)
td_model_rhf.nroots = 5
td_model_rhf.kernel()
cls.ref_m_rhf = retrieve_m(td_model_rhf)
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_rhf
del cls.model_rhf
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
for eri in (PhysERI, PhysERI4, PhysERI8):
try:
e = eri(self.model_rhf)
m = e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, retrieve_m_hf(e), atol=1e-14)
# Test matrix vs pyscf
testing.assert_allclose(self.ref_m_rhf, m, atol=1e-14)
vals, vecs = eig(m, nroots=self.td_model_rhf.nroots)
testing.assert_allclose(vals, self.td_model_rhf.e, atol=1e-5)
except Exception:
print("When testing {} the following exception occurred:".format(eri))
raise
def test_class(self):
"""Tests container behavior."""
model = TDRHF(self.model_rhf)
model.nroots = self.td_model_rhf.nroots
assert model.fast
e, xy = model.kernel()
model.fast = False
model.kernel()
# Slow vs fast
testing.assert_allclose(model.e, e)
assert_vectors_close(model.xy, xy)
# ... vs ref
testing.assert_allclose(model.e, self.td_model_rhf.e, atol=1e-12)
assert_vectors_close(model.xy, self.td_model_rhf.xy, atol=1e-12)
# Test real
testing.assert_allclose(model.e.imag, 0, atol=1e-8)
def test_cplx(self):
"""Tests whether complex conjugation is handled correctly."""
# Perform mf calculation
model_rhf = RHF(self.cell)
model_rhf.kernel()
# Add random phases
import numpy
numpy.random.seed(0)
p = numpy.exp(2.j * numpy.pi * numpy.random.rand(model_rhf.mo_coeff.shape[1]))
model_rhf.mo_coeff = model_rhf.mo_coeff * p[numpy.newaxis, :]
m_ref = PhysERI(model_rhf).tdhf_full_form()
td_model_rhf = TDRHF(model_rhf)
assert not td_model_rhf.fast
td_model_rhf.kernel()
with self.assertRaises(ValueError):
td_model_rhf.fast = True
td_model_rhf.kernel()
self.assertIsInstance(td_model_rhf.eri, PhysERI4)
m = td_model_rhf.eri.tdhf_full_form()
testing.assert_allclose(m, m_ref, atol=1e-14)
| apache-2.0 |
alexandrujuncu/sos | sos/plugins/kimchi.py | 14 | 1619 | # Copyright IBM, Corp. 2014, Christy Perez <christy@linux.vnet.ibm.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
class Kimchi(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
"""kimchi-related information
"""
plugin_name = 'kimchi'
packages = ('kimchi',)
def setup(self):
log_limit = self.get_option('log_size')
self.add_copy_spec('/etc/kimchi/')
if not self.get_option('all_logs'):
self.add_copy_spec_limit('/var/log/kimchi/*.log',
sizelimit=log_limit)
self.add_copy_spec_limit('/etc/kimchi/kimchi*',
sizelimit=log_limit)
self.add_copy_spec_limit('/etc/kimchi/distros.d/*.json',
sizelimit=log_limit)
else:
self.add_copy_spec('/var/log/kimchi/')
# vim: expandtab tabstop=4 shiftwidth=4
| gpl-2.0 |
sztanko/hadoop-common | src/contrib/hod/testing/testModule.py | 182 | 2187 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = ['test_MINITEST3']
# All test-case classes should have the naming convention test_.*
class test_MINITEST1(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST2(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST3(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class ModuleTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunModuleTests():
# modulename_suite
suite = ModuleTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunModuleTests()
| apache-2.0 |
adelton/origin | cmd/service-catalog/go/src/github.com/kubernetes-incubator/service-catalog/vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/flagmanager.py | 182 | 4599 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core import unitdata
class FlagManager:
'''
FlagManager - A Python class for managing the flags to pass to an
application without remembering what's been set previously.
This is a blind class assuming the operator knows what they are doing.
Each instance of this class should be initialized with the intended
application to manage flags. Flags are then appended to a data-structure
and cached in unitdata for later recall.
THe underlying data-provider is backed by a SQLITE database on each unit,
tracking the dictionary, provided from the 'charmhelpers' python package.
Summary:
opts = FlagManager('docker')
opts.add('bip', '192.168.22.2')
opts.to_s()
'''
def __init__(self, daemon, opts_path=None):
self.db = unitdata.kv()
self.daemon = daemon
if not self.db.get(daemon):
self.data = {}
else:
self.data = self.db.get(daemon)
def __save(self):
self.db.set(self.daemon, self.data)
def add(self, key, value, strict=False):
'''
Adds data to the map of values for the DockerOpts file.
Supports single values, or "multiopt variables". If you
have a flag only option, like --tlsverify, set the value
to None. To preserve the exact value, pass strict
eg:
opts.add('label', 'foo')
opts.add('label', 'foo, bar, baz')
opts.add('flagonly', None)
opts.add('cluster-store', 'consul://a:4001,b:4001,c:4001/swarm',
strict=True)
'''
if strict:
self.data['{}-strict'.format(key)] = value
self.__save()
return
if value:
values = [x.strip() for x in value.split(',')]
# handle updates
if key in self.data and self.data[key] is not None:
item_data = self.data[key]
for c in values:
c = c.strip()
if c not in item_data:
item_data.append(c)
self.data[key] = item_data
else:
# handle new
self.data[key] = values
else:
# handle flagonly
self.data[key] = None
self.__save()
def remove(self, key, value):
'''
Remove a flag value from the DockerOpts manager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.remove('foo', 'bar')
> {'foo': ['baz']}
:params key:
:params value:
'''
self.data[key].remove(value)
self.__save()
def destroy(self, key, strict=False):
'''
Destructively remove all values and key from the FlagManager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.wipe('foo')
>{}
:params key:
:params strict:
'''
try:
if strict:
self.data.pop('{}-strict'.format(key))
else:
self.data.pop('key')
except KeyError:
pass
def to_s(self):
'''
Render the flags to a single string, prepared for the Docker
Defaults file. Typically in /etc/default/docker
d.to_s()
> "--foo=bar --foo=baz"
'''
flags = []
for key in self.data:
if self.data[key] is None:
# handle flagonly
flags.append("{}".format(key))
elif '-strict' in key:
# handle strict values, and do it in 2 steps.
# If we rstrip -strict it strips a tailing s
proper_key = key.rstrip('strict').rstrip('-')
flags.append("{}={}".format(proper_key, self.data[key]))
else:
# handle multiopt and typical flags
for item in self.data[key]:
flags.append("{}={}".format(key, item))
return ' '.join(flags)
| apache-2.0 |
aleju/imgaug | checks/check_visually.py | 2 | 8145 | """
Tests to visually inspect the results of the library's functionality.
Run checks via
python check_visually.py
"""
from __future__ import print_function, division
import argparse
import numpy as np
from skimage import data
import imgaug as ia
from imgaug import augmenters as iaa
def main():
parser = argparse.ArgumentParser(description="Check augmenters visually.")
parser.add_argument(
"--only", default=None,
help="If this is set, then only the results of an augmenter with this name will be shown. "
"Optionally, comma-separated list.",
required=False)
args = parser.parse_args()
images = [
ia.quokka_square(size=(128, 128)),
ia.imresize_single_image(data.astronaut(), (128, 128))
]
keypoints = [
ia.KeypointsOnImage([
ia.Keypoint(x=50, y=40),
ia.Keypoint(x=70, y=38),
ia.Keypoint(x=62, y=52)
],
shape=images[0].shape
),
ia.KeypointsOnImage([
ia.Keypoint(x=55, y=32),
ia.Keypoint(x=42, y=95),
ia.Keypoint(x=75, y=89)
],
shape=images[1].shape
)
]
bounding_boxes = [
ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
],
shape=images[0].shape
),
ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
],
shape=images[1].shape
)
]
augmenters = [
iaa.Sequential([
iaa.CoarseDropout(p=0.5, size_percent=0.05),
iaa.AdditiveGaussianNoise(scale=0.1*255),
iaa.Crop(percent=0.1)
], name="Sequential"),
iaa.SomeOf(2, children=[
iaa.CoarseDropout(p=0.5, size_percent=0.05),
iaa.AdditiveGaussianNoise(scale=0.1*255),
iaa.Crop(percent=0.1)
], name="SomeOf"),
iaa.OneOf(children=[
iaa.CoarseDropout(p=0.5, size_percent=0.05),
iaa.AdditiveGaussianNoise(scale=0.1*255),
iaa.Crop(percent=0.1)
], name="OneOf"),
iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=0.1*255), name="Sometimes"),
iaa.WithColorspace("HSV", children=[iaa.Add(20)], name="WithColorspace"),
iaa.WithChannels([0], children=[iaa.Add(20)], name="WithChannels"),
iaa.AddToHueAndSaturation((-20, 20), per_channel=True, name="AddToHueAndSaturation"),
iaa.Identity(name="Identity"),
iaa.Resize({"width": 64, "height": 64}, name="Resize"),
iaa.CropAndPad(px=(-8, 8), name="CropAndPad-px"),
iaa.Pad(px=(0, 8), name="Pad-px"),
iaa.Crop(px=(0, 8), name="Crop-px"),
iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Superpixels(p_replace=0.75, n_segments=50, name="Superpixels"),
iaa.Grayscale(0.5, name="Grayscale0.5"),
iaa.Grayscale(1.0, name="Grayscale1.0"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
iaa.BilateralBlur(d=10, name="BilateralBlur"),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0, 2.0), name="Sharpen"),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0, 2.0), name="Emboss"),
iaa.EdgeDetect(alpha=(0.1, 1.0), name="EdgeDetect"),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0, 1.0), name="DirectedEdgeDetect"),
iaa.Add((-50, 50), name="Add"),
iaa.Add((-50, 50), per_channel=True, name="AddPerChannel"),
iaa.AddElementwise((-50, 50), name="AddElementwise"),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255), name="AdditiveGaussianNoise"),
iaa.Multiply((0.5, 1.5), name="Multiply"),
iaa.Multiply((0.5, 1.5), per_channel=True, name="MultiplyPerChannel"),
iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
iaa.Dropout((0.0, 0.1), name="Dropout"),
iaa.CoarseDropout(p=0.05, size_percent=(0.05, 0.5), name="CoarseDropout"),
iaa.Invert(p=0.5, name="Invert"),
iaa.Invert(p=0.5, per_channel=True, name="InvertPerChannel"),
iaa.ContrastNormalization(alpha=(0.5, 2.0), name="ContrastNormalization"),
iaa.SaltAndPepper(p=0.05, name="SaltAndPepper"),
iaa.Salt(p=0.05, name="Salt"),
iaa.Pepper(p=0.05, name="Pepper"),
iaa.CoarseSaltAndPepper(p=0.05, size_percent=(0.01, 0.1), name="CoarseSaltAndPepper"),
iaa.CoarseSalt(p=0.05, size_percent=(0.01, 0.1), name="CoarseSalt"),
iaa.CoarsePepper(p=0.05, size_percent=(0.01, 0.1), name="CoarsePepper"),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=ia.ALL,
cval=(0, 255),
mode=ia.ALL,
name="Affine"
),
iaa.PiecewiseAffine(scale=0.03, nb_rows=(2, 6), nb_cols=(2, 6), name="PiecewiseAffine"),
iaa.PerspectiveTransform(scale=0.1, name="PerspectiveTransform"),
iaa.ElasticTransformation(alpha=(0.5, 8.0), sigma=1.0, name="ElasticTransformation"),
iaa.Alpha(
factor=(0.0, 1.0),
first=iaa.Add(100),
second=iaa.Dropout(0.5),
per_channel=False,
name="Alpha"
),
iaa.Alpha(
factor=(0.0, 1.0),
first=iaa.Add(100),
second=iaa.Dropout(0.5),
per_channel=True,
name="AlphaPerChannel"
),
iaa.Alpha(
factor=(0.0, 1.0),
first=iaa.Affine(rotate=(-45, 45)),
per_channel=True,
name="AlphaAffine"
),
iaa.AlphaElementwise(
factor=(0.0, 1.0),
first=iaa.Add(50),
second=iaa.ContrastNormalization(2.0),
per_channel=False,
name="AlphaElementwise"
),
iaa.AlphaElementwise(
factor=(0.0, 1.0),
first=iaa.Add(50),
second=iaa.ContrastNormalization(2.0),
per_channel=True,
name="AlphaElementwisePerChannel"
),
iaa.AlphaElementwise(
factor=(0.0, 1.0),
first=iaa.Affine(rotate=(-45, 45)),
per_channel=True,
name="AlphaElementwiseAffine"
),
iaa.SimplexNoiseAlpha(
first=iaa.EdgeDetect(1.0),
per_channel=False,
name="SimplexNoiseAlpha"
),
iaa.FrequencyNoiseAlpha(
first=iaa.EdgeDetect(1.0),
per_channel=False,
name="FrequencyNoiseAlpha"
)
]
augmenters.append(iaa.Sequential([iaa.Sometimes(0.2, aug.copy()) for aug in augmenters], name="Sequential"))
augmenters.append(iaa.Sometimes(0.5, [aug.copy() for aug in augmenters], name="Sometimes"))
for augmenter in augmenters:
if args.only is None or augmenter.name in [v.strip() for v in args.only.split(",")]:
print("Augmenter: %s" % (augmenter.name,))
grid = []
for image, kps, bbs in zip(images, keypoints, bounding_boxes):
aug_det = augmenter.to_deterministic()
imgs_aug = aug_det.augment_images(np.tile(image[np.newaxis, ...], (16, 1, 1, 1)))
kps_aug = aug_det.augment_keypoints([kps] * 16)
bbs_aug = aug_det.augment_bounding_boxes([bbs] * 16)
imgs_aug_drawn = [kps_aug_one.draw_on_image(img_aug) for img_aug, kps_aug_one in zip(imgs_aug, kps_aug)]
imgs_aug_drawn = [bbs_aug_one.draw_on_image(img_aug) for img_aug, bbs_aug_one in zip(imgs_aug_drawn, bbs_aug)]
grid.append(np.hstack(imgs_aug_drawn))
ia.imshow(np.vstack(grid))
if __name__ == "__main__":
main()
| mit |
andrewmoses/ssquiz | flask/lib/python2.7/site-packages/coverage/html.py | 159 | 13097 | """HTML reporting for Coverage."""
import os, re, shutil, sys
import coverage
from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
"""
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
raise CoverageException("Couldn't find static file %r" % fname)
def data(fname):
"""Return the contents of a data file of ours."""
data_file = open(data_filename(fname))
try:
return data_file.read()
finally:
data_file.close()
class HtmlReporter(Reporter):
"""HTML reporting."""
# These files will be copied from the htmlfiles dir to the output dir.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov, config):
super(HtmlReporter, self).__init__(cov, config)
self.directory = None
self.template_globals = {
'escape': escape,
'title': self.config.html_title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(
data("pyfile.html"), self.template_globals
)
self.coverage = cov
self.files = []
self.arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or filenames.
"""
assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
m.update(self.config)
these_settings = m.digest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
# The user may have extra CSS they want copied.
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
# Process all the files.
self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.files:
raise CoverageException("No data to report.")
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def write_html(self, fname, html):
"""Write `html` to `fname`, properly encoded."""
fout = open(fname, "wb")
try:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
finally:
fout.close()
def file_hash(self, source, cu):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
m.update(source)
self.coverage.data.add_to_hash(cu.filename, m)
return m.digest()
def html_file(self, cu, analysis):
"""Generate an HTML file for one source file."""
source_file = cu.source_file()
try:
source = source_file.read()
finally:
source_file.close()
# Find out if the file on disk is already correct.
flat_rootname = cu.flat_rootname()
this_hash = self.file_hash(source, cu)
that_hash = self.status.file_hash(flat_rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
self.files.append(self.status.index_info(flat_rootname))
return
self.status.set_file_hash(flat_rootname, this_hash)
# If need be, determine the encoding of the source file. We use it
# later to properly write the HTML.
if sys.version_info < (3, 0):
encoding = source_encoding(source)
# Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
source = source[3:]
encoding = "utf-8"
# Get the numbers for this file.
nums = analysis.numbers
if self.arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
c_exc = "exc"
c_mis = "mis"
c_par = "par " + c_run
lines = []
for lineno, line in enumerate(source_token_lines(source)):
lineno += 1 # 1-based line numbers.
# Figure out how to mark this line.
line_class = []
annotate_html = ""
annotate_title = ""
if lineno in analysis.statements:
line_class.append("stm")
if lineno in analysis.excluded:
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
elif self.arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
annlines = []
for b in missing_branch_arcs[lineno]:
if b < 0:
annlines.append("exit")
else:
annlines.append(str(b))
annotate_html = " ".join(annlines)
if len(annlines) > 1:
annotate_title = "no jumps to these line numbers"
elif len(annlines) == 1:
annotate_title = "no jump to this line number"
elif lineno in analysis.statements:
line_class.append(c_run)
# Build the HTML for the line
html = []
for tok_type, tok_text in line:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
"<span class='%s'>%s</span>" % (tok_type, tok_html)
)
lines.append({
'html': ''.join(html),
'number': lineno,
'class': ' '.join(line_class) or "pln",
'annotate': annotate_html,
'annotate_title': annotate_title,
})
# Write the HTML page for this file.
html = spaceless(self.source_tmpl.render({
'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
'arcs': self.arcs, 'extra_css': self.extra_css,
'cu': cu, 'nums': nums, 'lines': lines,
}))
if sys.version_info < (3, 0):
html = html.decode(encoding)
html_filename = flat_rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
self.write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'name': cu.name,
}
self.files.append(index_info)
self.status.set_index_info(flat_rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(
data("index.html"), self.template_globals
)
self.totals = sum([f['nums'] for f in self.files])
html = index_tmpl.render({
'arcs': self.arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
})
if sys.version_info < (3, 0):
html = html.decode("utf-8")
self.write_html(
os.path.join(self.directory, "index.html"),
html
)
# Write the latest hashes for next time.
self.status.write(self.directory)
class HtmlStatus(object):
"""The status information we keep to support incremental reporting."""
STATUS_FILE = "status.dat"
STATUS_FORMAT = 1
def __init__(self):
self.reset()
def reset(self):
"""Initialize to empty."""
self.settings = ''
self.files = {}
def read(self, directory):
"""Read the last status in `directory`."""
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
fstatus = open(status_file, "rb")
try:
status = pickle.load(fstatus)
finally:
fstatus.close()
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = status['files']
self.settings = status['settings']
else:
self.reset()
def write(self, directory):
"""Write the current status to `directory`."""
status_file = os.path.join(directory, self.STATUS_FILE)
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'settings': self.settings,
'files': self.files,
}
fout = open(status_file, "wb")
try:
pickle.dump(status, fout)
finally:
fout.close()
def settings_hash(self):
"""Get the hash of the coverage.py settings."""
return self.settings
def set_settings_hash(self, settings):
"""Set the hash of the coverage.py settings."""
self.settings = settings
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`."""
return (t
# Convert HTML special chars into HTML entities.
.replace("&", "&").replace("<", "<").replace(">", ">")
.replace("'", "'").replace('"', """)
# Convert runs of spaces: "......" -> " . . ."
.replace(" ", " ")
# To deal with odd-length runs, convert the final pair of spaces
# so that "....." -> " . ."
.replace(" ", " ")
)
def spaceless(html):
"""Squeeze out some annoying extra space from an HTML string.
Nicely-formatted templates mean lots of extra space in the result.
Get rid of some.
"""
html = re.sub(r">\s+<p ", ">\n<p ", html)
return html
| bsd-3-clause |
Philippe12/external_chromium_org | tools/telemetry/telemetry/core/platform/__init__.py | 23 | 6350 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from telemetry.core.platform import linux_platform_backend
from telemetry.core.platform import mac_platform_backend
from telemetry.core.platform import win_platform_backend
class Platform(object):
"""The platform that the target browser is running on.
Provides a limited interface to interact with the platform itself, where
possible. It's important to note that platforms may not provide a specific
API, so check with IsFooBar() for availability.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
def IsRawDisplayFrameRateSupported(self):
"""Platforms may be able to collect GL surface stats."""
return self._platform_backend.IsRawDisplayFrameRateSupported()
def StartRawDisplayFrameRateMeasurement(self):
"""Start measuring GL surface stats."""
return self._platform_backend.StartRawDisplayFrameRateMeasurement()
def StopRawDisplayFrameRateMeasurement(self):
"""Stop measuring GL surface stats."""
return self._platform_backend.StopRawDisplayFrameRateMeasurement()
class RawDisplayFrameRateMeasurement(object):
def __init__(self, name, value, unit):
self._name = name
self._value = value
self._unit = unit
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def unit(self):
return self._unit
def GetRawDisplayFrameRateMeasurements(self):
"""Returns a list of RawDisplayFrameRateMeasurement."""
return self._platform_backend.GetRawDisplayFrameRateMeasurements()
def SetFullPerformanceModeEnabled(self, enabled):
"""Platforms may tweak their CPU governor, system status, etc.
Most platforms can operate in a battery saving mode. While good for battery
life, this can cause confusing performance results and add noise. Turning
full performance mode on disables these features, which is useful for
performance testing.
"""
return self._platform_backend.SetFullPerformanceModeEnabled(enabled)
def CanMonitorThermalThrottling(self):
"""Platforms may be able to detect thermal throttling.
Some fan-less computers go into a reduced performance mode when their heat
exceeds a certain threshold. Performance tests in particular should use this
API to detect if this has happened and interpret results accordingly.
"""
return self._platform_backend.CanMonitorThermalThrottling()
def IsThermallyThrottled(self):
"""Returns True if the device is currently thermally throttled."""
return self._platform_backend.IsThermallyThrottled()
def HasBeenThermallyThrottled(self):
"""Returns True if the device has been thermally throttled."""
return self._platform_backend.HasBeenThermallyThrottled()
def GetOSName(self):
"""Returns a string description of the Platform OS.
Examples: WIN, MAC, LINUX, CHROMEOS"""
return self._platform_backend.GetOSName()
def GetOSVersionName(self):
"""Returns a string description of the Platform OS version.
Examples: VISTA, WIN7, LION, MOUNTAINLION"""
return self._platform_backend.GetOSVersionName()
def CanFlushIndividualFilesFromSystemCache(self):
"""Returns true if the disk cache can be flushed for specific files."""
return self._platform_backend.CanFlushIndividualFilesFromSystemCache()
def FlushEntireSystemCache(self):
"""Flushes the OS's file cache completely.
This function may require root or administrator access."""
return self._platform_backend.FlushEntireSystemCache()
def FlushSystemCacheForDirectory(self, directory, ignoring=None):
"""Flushes the OS's file cache for the specified directory.
Any files or directories inside |directory| matching a name in the
|ignoring| list will be skipped.
This function does not require root or administrator access."""
return self._platform_backend.FlushSystemCacheForDirectory(
directory, ignoring=ignoring)
def LaunchApplication(self, application, parameters=None):
""""Launchs a given application on the OS."""
return self._platform_backend.LaunchApplication(application,
parameters)
def IsApplicationRunning(self, application):
"""Returns whether an application is currently running."""
return self._platform_backend.IsApplicationLaunchning(application)
def CanLaunchApplication(self, application):
"""Returns whether the platform can launch the given application."""
return self._platform_backend.CanLaunchApplication(application)
def InstallApplication(self, application):
"""Installs the given application."""
return self._platform_backend.InstallApplication(application)
def CanCaptureVideo(self):
"""Returns a bool indicating whether the platform supports video capture."""
return self._platform_backend.CanCaptureVideo()
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts capturing video.
Outer framing may be included (from the OS, browser window, and webcam).
Args:
min_bitrate_mbps: The minimum capture bitrate in MegaBits Per Second.
The platform is free to deliver a higher bitrate if it can do so
without increasing overhead.
Raises:
ValueError if the required |min_bitrate_mbps| can't be achieved.
"""
return self._platform_backend.StartVideoCapture(min_bitrate_mbps)
def StopVideoCapture(self):
"""Stops capturing video.
Yields:
(time_ms, bitmap) tuples representing each video keyframe. Only the first
frame in a run of sequential duplicate bitmaps is included.
time_ms is milliseconds relative to the first frame.
bitmap is a telemetry.core.Bitmap.
"""
for t in self._platform_backend.StopVideoCapture():
yield t
def CreatePlatformBackendForCurrentOS():
if sys.platform.startswith('linux'):
return linux_platform_backend.LinuxPlatformBackend()
elif sys.platform == 'darwin':
return mac_platform_backend.MacPlatformBackend()
elif sys.platform == 'win32':
return win_platform_backend.WinPlatformBackend()
else:
raise NotImplementedError()
| bsd-3-clause |
MarineLasbleis/GrowYourIC | notebooks/WD11.py | 1 | 2954 | # import statements
import numpy as np
import matplotlib.pyplot as plt #for figures
from mpl_toolkits.basemap import Basemap #to render maps
import math
import json #to write dict with parameters
from GrowYourIC import positions, geodyn, geodyn_trg, geodyn_static, plot_data, data
plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis')
cm2 = plt.cm.get_cmap('winter')
## real data set
data_set = data.SeismicFromFile("~/ownCloud/Research/Projets/CIDER_IC/GrowYourIC/GrowYourIC/data/WD11.dat")
residual = data_set.real_residual()
velocity_center = [0., -80]#center of the eastern hemisphere
r, t, p = data_set.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *velocity_center)
fig, ax = plt.subplots(2)
ax[0].hist(1221*(1-r))
zeta = data_set.extract_zeta()
ax[1].hist(zeta)
fig, ax = plt.subplots(sharey=True, figsize=(8, 2))
cm2 = plt.cm.get_cmap('winter')
sc1 = ax.scatter(p, residual, c=zeta, s=10,cmap=cm2, linewidth=0)
ax.set_xlabel("longitude")
ax.set_ylabel("residuals")
ax.set_xlim([-180, 180])
#sc2 = ax[1].scatter(dist, residual, c="k", s=10,cmap=cm2, linewidth=0)
#ax[1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
#ax[1].set_xlim([0, 180])
#fig.suptitle("Dataset: {},\n geodynamic model: {}".format(data_set_random.name, geodynModel.name))
cbar2 = fig.colorbar(sc1)
cbar2.set_label("zeta")
fig, ax = plt.subplots(figsize=(8, 2))
rICB_dim = 1221. #in km
sc=ax.scatter(p,rICB_dim*(1.-r), c=residual, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
cbar.set_label("Residual")
ax.set_xlabel("longitude")
ax.set_ylabel("depth (km)")
ax.plot([11,11],[10,30], 'k')
ax.plot([21,21],[30,58], 'k')
ax.plot([38,38],[58,110], 'k')
ax.plot([-80,100], [30,30], 'k:')
ax.plot([-80,100], [58,58], 'k:')
points = [13, 234, 456, 1234, 2343, 27, 56, 567, 789]
for point_value in points:
point = data_set[point_value]
print(point)
point.straight_in_out(30)
traj_r = np.zeros(30)
traj_p = np.zeros(30)
for i, po in enumerate(point.points):
r, t, p = po.r, po.theta, po.phi-180.
traj_r[i] =rICB_dim*(1.-r)
traj_p[i] = p
ax.plot(traj_p, traj_r, 'k')
plt.savefig("test.pdf")
print(r.shape, residual.shape)
fig, ax = plt.subplots(1, 4, sharey=True, sharex=True)
sc = ax[0].scatter(residual, zeta, c=dist , cmap="seismic", linewidth=0, s=10)
cbar = fig.colorbar(sc)
masks = [np.squeeze(rICB_dim*(1.-r))<30, np.squeeze(rICB_dim*(1.-r))>58, (np.squeeze(rICB_dim*(1.-r))>30)*np.squeeze(rICB_dim*(1.-r))<58]
#mask = np.squeeze(rICB_dim*(1.-r))<30
#print(mask.shape, zeta.shape)
zeta = np.squeeze(zeta)
dist = np.squeeze(dist)
for i, mask in enumerate(masks):
ax[i+1].scatter(np.ma.masked_where(mask, (residual)), np.ma.masked_where(mask, zeta), c= np.ma.masked_where(mask, dist), s=10, cmap="seismic", linewidth=0)
plt.show() | mit |
gerald-yang/ubuntu-iotivity-demo | snappy/grovepi/python-env/local/lib/python2.7/encodings/iso8859_10.py | 593 | 13845 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
mika76/Wox | PythonHome/Lib/site-packages/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
luotao1/Paddle | python/paddle/dataset/movielens.py | 1 | 9031 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Movielens 1-M dataset.
Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000
movies, which was collected by GroupLens Research. This module will download
Movielens 1-M dataset from
http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training
set and test set into paddle reader creators.
"""
from __future__ import print_function
import numpy as np
import zipfile
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import re
import random
import functools
import six
import paddle.compat as cpt
__all__ = [
'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id',
'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info'
]
age_table = [1, 18, 25, 35, 45, 50, 56]
#URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip'
MD5 = 'c4d9eecfca2ab87c1945afe126590906'
class MovieInfo(object):
"""
Movie id, title and categories information are stored in MovieInfo.
"""
def __init__(self, index, categories, title):
self.index = int(index)
self.categories = categories
self.title = title
def value(self):
"""
Get information from a movie.
"""
return [
self.index, [CATEGORIES_DICT[c] for c in self.categories],
[MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()]
]
def __str__(self):
return "<MovieInfo id(%d), title(%s), categories(%s)>" % (
self.index, self.title, self.categories)
def __repr__(self):
return self.__str__()
class UserInfo(object):
"""
User id, gender, age, and job information are stored in UserInfo.
"""
def __init__(self, index, gender, age, job_id):
self.index = int(index)
self.is_male = gender == 'M'
self.age = age_table.index(int(age))
self.job_id = int(job_id)
def value(self):
"""
Get information from a user.
"""
return [self.index, 0 if self.is_male else 1, self.age, self.job_id]
def __str__(self):
return "<UserInfo id(%d), gender(%s), age(%d), job(%d)>" % (
self.index, "M"
if self.is_male else "F", age_table[self.age], self.job_id)
def __repr__(self):
return str(self)
MOVIE_INFO = None
MOVIE_TITLE_DICT = None
CATEGORIES_DICT = None
USER_INFO = None
def __initialize_meta_info__():
fn = paddle.dataset.common.download(URL, "movielens", MD5)
global MOVIE_INFO
if MOVIE_INFO is None:
pattern = re.compile(r'^(.*)\((\d+)\)$')
with zipfile.ZipFile(file=fn) as package:
for info in package.infolist():
assert isinstance(info, zipfile.ZipInfo)
MOVIE_INFO = dict()
title_word_set = set()
categories_set = set()
with package.open('ml-1m/movies.dat') as movie_file:
for i, line in enumerate(movie_file):
line = cpt.to_text(line, encoding='latin')
movie_id, title, categories = line.strip().split('::')
categories = categories.split('|')
for c in categories:
categories_set.add(c)
title = pattern.match(title).group(1)
MOVIE_INFO[int(movie_id)] = MovieInfo(
index=movie_id, categories=categories, title=title)
for w in title.split():
title_word_set.add(w.lower())
global MOVIE_TITLE_DICT
MOVIE_TITLE_DICT = dict()
for i, w in enumerate(title_word_set):
MOVIE_TITLE_DICT[w] = i
global CATEGORIES_DICT
CATEGORIES_DICT = dict()
for i, c in enumerate(categories_set):
CATEGORIES_DICT[c] = i
global USER_INFO
USER_INFO = dict()
with package.open('ml-1m/users.dat') as user_file:
for line in user_file:
line = cpt.to_text(line, encoding='latin')
uid, gender, age, job, _ = line.strip().split("::")
USER_INFO[int(uid)] = UserInfo(
index=uid, gender=gender, age=age, job_id=job)
return fn
def __reader__(rand_seed=0, test_ratio=0.1, is_test=False):
fn = __initialize_meta_info__()
np.random.seed(rand_seed)
with zipfile.ZipFile(file=fn) as package:
with package.open('ml-1m/ratings.dat') as rating:
for line in rating:
line = cpt.to_text(line, encoding='latin')
if (np.random.random() < test_ratio) == is_test:
uid, mov_id, rating, _ = line.strip().split("::")
uid = int(uid)
mov_id = int(mov_id)
rating = float(rating) * 2 - 5.0
mov = MOVIE_INFO[mov_id]
usr = USER_INFO[uid]
yield usr.value() + mov.value() + [[rating]]
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def __reader_creator__(**kwargs):
return lambda: __reader__(**kwargs)
train = functools.partial(__reader_creator__, is_test=False)
test = functools.partial(__reader_creator__, is_test=True)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def get_movie_title_dict():
"""
Get movie title dictionary.
"""
__initialize_meta_info__()
return MOVIE_TITLE_DICT
def __max_index_info__(a, b):
if a.index > b.index:
return a
else:
return b
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def max_movie_id():
"""
Get the maximum value of movie id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_index_info__, list(MOVIE_INFO.values())).index
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def max_user_id():
"""
Get the maximum value of user id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_index_info__, list(USER_INFO.values())).index
def __max_job_id_impl__(a, b):
if a.job_id > b.job_id:
return a
else:
return b
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def max_job_id():
"""
Get the maximum value of job id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_job_id_impl__,
list(USER_INFO.values())).job_id
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def movie_categories():
"""
Get movie categories dictionary.
"""
__initialize_meta_info__()
return CATEGORIES_DICT
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def user_info():
"""
Get user info dictionary.
"""
__initialize_meta_info__()
return USER_INFO
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def movie_info():
"""
Get movie info dictionary.
"""
__initialize_meta_info__()
return MOVIE_INFO
def unittest():
for train_count, _ in enumerate(train()()):
pass
for test_count, _ in enumerate(test()()):
pass
print(train_count, test_count)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.Movielens",
reason="Please use new dataset API which supports paddle.io.DataLoader")
def fetch():
paddle.dataset.common.download(URL, "movielens", MD5)
if __name__ == '__main__':
unittest()
| apache-2.0 |
vecnet/vnetsource | djorm-ext-pgbytea/djorm_pgbytea/lobject.py | 4 | 2813 | # -*- coding: utf-8 -*-
import types
import sys
import six
from django.db import models, connections
from psycopg2.extensions import lobject as lobject_class
class LargeObjectFile(object):
"""
Proxy class over psycopg2 large object file instance.
"""
def __init__(self, oid=0, field=None, instance=None, **params):
self.oid = oid
self.field = field
self.instance = instance
self.params = params
self._obj = None
def __getattr__(self, name):
if self._obj is None:
raise Exception("File is not opened")
try:
return super(LargeObjectFile, self).__getattr__(name)
except AttributeError:
return getattr(self._obj, name)
def open(self, mode="rwb", new_file=None, using="default"):
connection = connections[using]
self._obj = connection.connection.lobject(self.oid, mode, 0, new_file)
self.oid = self._obj.oid
return self
class LargeObjectDescriptor(models.fields.subclassing.Creator):
"""
LargeObjectField descriptor.
"""
def __set__(self, instance, value):
value = self.field.to_python(value)
if value is not None:
if not isinstance(value, LargeObjectFile):
value = LargeObjectFile(value, self.field, instance)
instance.__dict__[self.field.name] = value
class LargeObjectField(models.IntegerField):
"""
LargeObject field.
Internally is an ``oid`` field but returns a proxy
to referenced file.
"""
def db_type(self, connection):
return 'oid'
def contribute_to_class(self, cls, name):
super(LargeObjectField, self).contribute_to_class(cls, name)
setattr(cls, self.name, LargeObjectDescriptor(self))
def _value_to_python(self, value):
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if isinstance(value, LargeObjectFile):
if value.oid > 0:
return value.oid
raise ValueError("Oid must be a great that 0")
elif value is None:
return None
raise ValueError("Invalid value")
def get_prep_value(self, value):
return value
def to_python(self, value):
if isinstance(value, LargeObjectFile):
return value
elif isinstance(value, six.integer_types):
return LargeObjectFile(value, self, self.model)
elif value is None:
return None
raise ValueError("Invalid value")
# South support
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^djorm_pgbytea\.lobject\.LargeObjectField'])
except ImportError:
pass
| mpl-2.0 |
abarthakur/voice_assistant | src/movieModule.py | 1 | 1920 | from os import system
import pyttsx
#Setting up the speaker
def onStart(name):
print ""
def onWord(name, location, length):
print ""
def onEnd(name, completed):
print ""
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-50)
engine.connect('started-utterance', onStart)
engine.connect('started-word', onWord)
engine.connect('finished-utterance', onEnd)
#Speaker terminated
#variables needed
music_dir="~/media/hritik/New\ Volume1/dc++/movies"
input_cmd=raw_input("Enter the command : ")
#setting up the commands
cmd={'pause':"totem --pause"}
cmd['next']="totem --play \n totem --next"
cmd['previous']="totem --play \n totem --previous"
cmd['play']="totem --play"
cmd['vol-up']="totem --volume-up"
cmd['vol-down']="totem --volume-down"
cmd['err_message']="print 'Sorry unknown command'"
cmd['quit']="totem --quit"
cmd['forward']="totem --seek-fwd"
cmd['backward']="totem --seek-bwd"
cmd['fullscreen']="totem --fullscreen"
cmd['mute']="totem --mute"
#Controller begins
priority_key={}
priority_key['next']=['next']
priority_key['previous']=['previous']
priority_key['pause']=['pause','stop']
priority_key['quit']=['quit','close']
priority_key['fullscreen']=['full','fullscreen','maximize','maximise']
priority_key['mute']=['mute','nil','zero','lowest']
priority_key['forward']=['ahead','forward']
priority_key['backward']=['back','backward']
priority_key['vol-up']=['increase','up','raise','high']
priority_key['vol-down']=['decrease','down','lower','low','reduce']
priority_key['play']=['play','start','video','movie','watch','player','current']
def search(word,command,priority_key):
if word in priority_key[command]:
return 1
return 0
str_split=input_cmd.split()
selected="err_message"
for command in priority_key:
for word in str_split:
if(search(word,command,priority_key)):
selected=command
break;
print selected
system(cmd[selected]) | gpl-3.0 |
aliyun/oss-ftp | python27/win32/Lib/test/test_htmllib.py | 133 | 1978 | import formatter
import unittest
from test import test_support
htmllib = test_support.import_module('htmllib', deprecated=True)
class AnchorCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__anchors = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_anchor_info(self):
return self.__anchors
def anchor_bgn(self, *args):
self.__anchors.append(args)
class DeclCollector(htmllib.HTMLParser):
def __init__(self, *args, **kw):
self.__decls = []
htmllib.HTMLParser.__init__(self, *args, **kw)
def get_decl_info(self):
return self.__decls
def unknown_decl(self, data):
self.__decls.append(data)
class HTMLParserTestCase(unittest.TestCase):
def test_anchor_collection(self):
# See SF bug #467059.
parser = AnchorCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<a href='http://foo.org/' name='splat'> </a>
<a href='http://www.python.org/'> </a>
<a name='frob'> </a>
""")
parser.close()
self.assertEqual(parser.get_anchor_info(),
[('http://foo.org/', 'splat', ''),
('http://www.python.org/', '', ''),
('', 'frob', ''),
])
def test_decl_collection(self):
# See SF patch #545300
parser = DeclCollector(formatter.NullFormatter(), verbose=1)
parser.feed(
"""<html>
<body>
hallo
<![if !supportEmptyParas]> <![endif]>
</body>
</html>
""")
parser.close()
self.assertEqual(parser.get_decl_info(),
["if !supportEmptyParas",
"endif"
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
| mit |
Creworker/FreeCAD | src/Tools/MakeNewBuildNbr.py | 16 | 2546 | # FreeCAD MakeNewBuildNbr script
# (c) 2002 Jürgen Riegel
#
# Increase the Build Number in Version.h
#***************************************************************************
#* (c) Jürgen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Library General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
import time
# reading the last Version information
[FCVersionMajor,FCVersionMinor,FCVersionBuild,FCVersionDisDa,dummy] = open("../Version.h",'r').readlines()
# increasing build number
BuildNumber = int(FCVersionBuild[23:-1]) +1
# writing new Version.h File
open("../Version.h",'w').writelines([FCVersionMajor,FCVersionMinor,FCVersionBuild[:23]+`BuildNumber`+'\n',FCVersionDisDa[:23]+ '"'+time.asctime()+'"'])
# writing the ChangeLog.txt
open("../ChangeLog.txt",'a').write("\nVersion: V"+FCVersionMajor[23:-1]+"."+FCVersionMinor[23:-1]+"B"+`BuildNumber`+" Date: "+time.asctime()+' +++++++++++++++++++++++++++++++\n')
| lgpl-2.1 |
gc3-uzh-ch/ganglia | gmetad-python/Gmetad/gmetad_element.py | 15 | 5497 | #/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
import copy
class Element:
''' This class implements the node element that is used to create the data store tree structure.'''
def generateKey(vals):
''' This methods generates a node key based on the node id and name'''
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs, tag=None):
''' This is the initialization method '''
# Initialize the id and tag for the node
self.id = id
if tag is None:
self.tag = id
else:
self.tag = tag
# If any attributes where given during intialization, add them here.
self.attrs = {}
self.lastReportedTime = 0
for k,v in attrs.items():
self.attrs[k.lower()] = v
self.children = {}
self.gridDepth = -1
def __setitem__(self, k, v):
''' This method adds or updates an attribute for the node. '''
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
''' This method retrieves a specific child node. '''
return self.children[k]
def update(self, elem):
''' This method updates an existing chld node based on a new node. '''
for k in self.attrs.keys():
try:
self.attrs[k] = elem.attrs[k]
except ValueError:
pass
def __str__(self):
''' This method generates a string representation of a node. '''
if self.attrs.has_key('name'):
return Element.generateKey([self.id,self.attrs['name']])
return Element.generateKey(self.id)
def __iter__(self):
''' This method allow the class to be an interator over it's children. '''
return self.children.itervalues()
def __copy__(self):
''' Shallow copy method, may not be used. '''
cp = Element(self.id, {})
for k in self.attrs.keys():
try:
cp.attrs[k.lower()] = copy.copy(self.attrs[k])
except ValueError:
pass
return cp
def summaryCopy(self, id=None, tag=None):
''' This method creates a copy of the node that can be used as a summary node. '''
attrs = {}
# Copy all of the attributes that are necessary for a summary node.
for k in self.attrs.keys():
try:
if k.lower() in ['name', 'sum', 'num', 'type', 'units', 'slope', 'source']:
attrs[k.lower()] = self.attrs[k]
attrs['sum'] = 0
attrs['num'] = 0
except ValueError:
pass
# Create a new node from the attributes that were copied from the existing node.
cp = Element(self.id, attrs, tag)
# Make sure that the summary node references the original children
cp.children = self.children
return cp
def getAttr(self, attr):
if self.attrs.has_key(attr.lower()):
return self.attrs[attr.lower()]
return None
def getAttrs(self):
return self.attrs
def setAttr(self, attr, val):
self.attrs[attr.lower()] = val
def incAttr(self, attr, val):
try:
self.attrs[attr.lower()] += val
except Exception, e:
print 'Can not increment attribute ' + str(e)
def getSummaryData(self):
try:
return self.summaryData
except:
return None
| bsd-3-clause |
Tony-Wang/YaYaNLP | test/test_organization_recognition.py | 1 | 2674 | # coding=utf-8
__author__ = 'tony'
from unittest import TestCase
from yaya.config import Config
from yaya.recognition import person_recognition
from yaya.recognition import place_recognition
from yaya.recognition import organization_recognition
from yaya.seg.viterbi import viterbi
from yaya.seg.wordnet import WordNet, Vertex, gen_word_net, dump_vertexs
from yaya.seg.segment import traditional_to_simplified
class TestOrgRecognition(TestCase):
def gen_word(self, text):
self.text = text
self.word_net = WordNet(self.text)
# 粗分词网
gen_word_net(self.text, self.word_net)
# 维特比
self.vertexs = viterbi(self.word_net.vertexs)
self.word_net_optimum = WordNet(self.text, vertexs=self.vertexs)
def test_recognition_1_level(self):
text = u"济南杨铭宇餐饮管理有限公司是由杨先生创办的餐饮企业"
self.gen_word(text)
# vertexs = persion_recognition.recognition(vertexs, word_net_optimum, word_net)
# word_net_optimum = WordNet(text, vertexs=vertexs)
organization_recognition.recognition(self.vertexs, self.word_net_optimum, self.word_net)
vertexs = viterbi(self.word_net_optimum.vertexs)
self.assertIn(Vertex(u"济南杨铭宇餐饮管理有限公司", attribute=u"nt 1"), vertexs)
def test_recognition_2_level(self):
text = u"济南杨铭宇餐饮管理有限公司是由杨先生创办的餐饮企业"
self.gen_word(text)
person_recognition.recognition(self.vertexs, self.word_net_optimum, self.word_net)
place_recognition.recognition(self.vertexs, self.word_net_optimum, self.word_net)
word_net_optimum = WordNet(self.text, vertexs=self.vertexs)
vertexs = organization_recognition.recognition(self.vertexs, word_net_optimum, self.word_net)
# viterbi(word_net_optimum.vertexs)
dump_vertexs(vertexs)
self.assertIn(Vertex(u"济南杨铭宇餐饮管理有限公司", attribute=u"nt 1"), vertexs)
def test_organization_recognition(self):
text = traditional_to_simplified(u"馬總統上午前往陸軍航空601旅,")
Config.debug = True
self.gen_word(text)
person_recognition.recognition(self.vertexs, self.word_net_optimum, self.word_net)
place_recognition.recognition(self.vertexs, self.word_net_optimum, self.word_net)
word_net_optimum = WordNet(self.text, vertexs=self.vertexs)
vertexs = organization_recognition.recognition(self.vertexs, word_net_optimum, self.word_net)
dump_vertexs(vertexs)
self.assertIn(Vertex(u"陆军航空601旅", attribute=u"nt 1"), vertexs)
| apache-2.0 |
codervince/flashingredlight | env/lib/python2.7/site-packages/gunicorn/six.py | 320 | 27344 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.8.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit |
lihui7115/ChromiumGStreamerBackend | infra/scripts/legacy/site_config/config_default.py | 6 | 7736 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Seeds a number of variables defined in chromium_config.py.
The recommended way is to fork this file and use a custom DEPS forked from
config/XXX/DEPS with the right configuration data."""
import os
import re
import socket
SERVICE_ACCOUNTS_PATH = '/creds/service_accounts'
class classproperty(object):
"""A decorator that allows is_production_host to only to be defined once."""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
class Master(object):
# Repository URLs used by the SVNPoller and 'gclient config'.
server_url = 'http://src.chromium.org'
repo_root = '/svn'
git_server_url = 'https://chromium.googlesource.com'
# External repos.
googlecode_url = 'http://%s.googlecode.com/svn'
sourceforge_url = 'https://svn.code.sf.net/p/%(repo)s/code'
googlecode_revlinktmpl = 'https://code.google.com/p/%s/source/browse?r=%s'
# Directly fetches from anonymous Blink svn server.
webkit_root_url = 'http://src.chromium.org/blink'
nacl_trunk_url = 'http://src.chromium.org/native_client/trunk'
llvm_url = 'http://llvm.org/svn/llvm-project'
# Perf Dashboard upload URL.
dashboard_upload_url = 'https://chromeperf.appspot.com'
# Actually for Chromium OS slaves.
chromeos_url = git_server_url + '/chromiumos.git'
# Default domain for emails to come from and
# domains to which emails can be sent.
master_domain = 'example.com'
permitted_domains = ('example.com',)
# Your smtp server to enable mail notifications.
smtp = 'smtp'
# By default, bot_password will be filled in by config.GetBotPassword().
bot_password = None
# Fake urls to make various factories happy.
trunk_internal_url = None
trunk_internal_url_src = None
slave_internal_url = None
git_internal_server_url = None
syzygy_internal_url = None
v8_internal_url = None
class Base(object):
"""Master base template.
Contains stubs for variables that all masters must define."""
# Base service offset for 'master_port'
MASTER_PORT = 2
# Base service offset for 'slave_port'
SLAVE_PORT = 3
# Base service offset for 'master_port_alt'
MASTER_PORT_ALT = 4
# Base service offset for 'try_job_port'
TRY_JOB_PORT = 5
# A BuildBucket bucket to poll.
buildbucket_bucket = None
# Master address. You should probably copy this file in another svn repo
# so you can override this value on both the slaves and the master.
master_host = 'localhost'
@classproperty
def current_host(cls):
return socket.getfqdn()
@classproperty
def in_production(cls):
return re.match(r'master.*\.golo\.chromium\.org', cls.current_host)
# Only report that we are running on a master if the master_host (even when
# master_host is overridden by a subclass) is the same as the current host.
@classproperty
def is_production_host(cls):
return cls.current_host == cls.master_host
# 'from:' field for emails sent from the server.
from_address = 'nobody@example.com'
# Additional email addresses to send gatekeeper (automatic tree closage)
# notifications. Unnecessary for experimental masters and try servers.
tree_closing_notification_recipients = []
@classproperty
def master_port(cls):
return cls._compose_port(cls.MASTER_PORT)
@classproperty
def slave_port(cls):
# Which port slaves use to connect to the master.
return cls._compose_port(cls.SLAVE_PORT)
@classproperty
def master_port_alt(cls):
# The alternate read-only page. Optional.
return cls._compose_port(cls.MASTER_PORT_ALT)
@classproperty
def try_job_port(cls):
return cls._compose_port(cls.TRY_JOB_PORT)
@classmethod
def _compose_port(cls, service):
"""Returns: The port number for 'service' from the master's static config.
Port numbers are mapped of the form:
XYYZZ
|| \__The last two digits identify the master, e.g. master.chromium
|\____The second and third digits identify the master host, e.g.
| master1.golo
\_____The first digit identifies the port type, e.g. master_port
If any configuration is missing (incremental migration), this method will
return '0' for that query, indicating no port.
"""
return (
(service * 10000) + # X
(cls.master_port_base * 100) + # YY
cls.master_port_id) # ZZ
service_account_file = None
@classproperty
def service_account_path(cls):
if cls.service_account_file is None:
return None
return os.path.join(SERVICE_ACCOUNTS_PATH, cls.service_account_file)
## Per-master configs.
class Master1(Base):
"""Chromium master."""
master_host = 'master1.golo.chromium.org'
master_port_base = 1
from_address = 'buildbot@chromium.org'
tree_closing_notification_recipients = [
'chromium-build-failure@chromium-gatekeeper-sentry.appspotmail.com']
base_app_url = 'https://chromium-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
last_good_blink_url = 'http://blink-status.appspot.com/lkgr'
class Master2(Base):
"""Legacy ChromeOS master."""
master_host = 'master2.golo.chromium.org'
master_port_base = 2
tree_closing_notification_recipients = [
'chromeos-build-failures@google.com']
from_address = 'buildbot@chromium.org'
class Master2a(Base):
"""Chromeos master."""
master_host = 'master2a.golo.chromium.org'
master_port_base = 15
tree_closing_notification_recipients = [
'chromeos-build-failures@google.com']
from_address = 'buildbot@chromium.org'
class Master3(Base):
"""Client master."""
master_host = 'master3.golo.chromium.org'
master_port_base = 3
tree_closing_notification_recipients = []
from_address = 'buildbot@chromium.org'
class Master4(Base):
"""Try server master."""
master_host = 'master4.golo.chromium.org'
master_port_base = 4
tree_closing_notification_recipients = []
from_address = 'tryserver@chromium.org'
code_review_site = 'https://codereview.chromium.org'
class Master4a(Base):
"""Try server master."""
master_host = 'master4a.golo.chromium.org'
master_port_base = 14
tree_closing_notification_recipients = []
from_address = 'tryserver@chromium.org'
code_review_site = 'https://codereview.chromium.org'
## Native Client related
class NaClBase(Master3):
"""Base class for Native Client masters."""
tree_closing_notification_recipients = ['bradnelson@chromium.org']
base_app_url = 'https://nativeclient-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
perf_base_url = 'http://build.chromium.org/f/client/perf'
## ChromiumOS related
class ChromiumOSBase(Master2):
"""Legacy base class for ChromiumOS masters"""
base_app_url = 'https://chromiumos-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
class ChromiumOSBase2a(Master2a):
"""Base class for ChromiumOS masters"""
base_app_url = 'https://chromiumos-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
| bsd-3-clause |
ivan-fedorov/intellij-community | python/lib/Lib/pawt/colors.py | 112 | 4500 | from java.awt import Color
aliceblue = Color(240, 248, 255)
antiquewhite = Color(250, 235, 215)
aqua = Color(0, 255, 255)
aquamarine = Color(127, 255, 212)
azure = Color(240, 255, 255)
beige = Color(245, 245, 220)
bisque = Color(255, 228, 196)
black = Color(0, 0, 0)
blanchedalmond = Color(255, 235, 205)
blue = Color(0, 0, 255)
blueviolet = Color(138, 43, 226)
brown = Color(165, 42, 42)
burlywood = Color(222, 184, 135)
cadetblue = Color(95, 158, 160)
chartreuse = Color(127, 255, 0)
chocolate = Color(210, 105, 30)
coral = Color(255, 127, 80)
cornflowerblue = Color(100, 149, 237)
cornsilk = Color(255, 248, 220)
crimson = Color(220, 20, 60)
cyan = Color(0, 255, 255)
darkblue = Color(0, 0, 139)
darkcyan = Color(0, 139, 139)
darkgoldenrod = Color(184, 134, 11)
darkgray = Color(169, 169, 169)
darkgreen = Color(0, 100, 0)
darkkhaki = Color(189, 183, 107)
darkmagenta = Color(139, 0, 139)
darkolivegreen = Color(85, 107, 47)
darkorange = Color(255, 140, 0)
darkorchid = Color(153, 50, 204)
darkred = Color(139, 0, 0)
darksalmon = Color(233, 150, 122)
darkseagreen = Color(143, 188, 143)
darkslateblue = Color(72, 61, 139)
darkslategray = Color(47, 79, 79)
darkturquoise = Color(0, 206, 209)
darkviolet = Color(148, 0, 211)
deeppink = Color(255, 20, 147)
deepskyblue = Color(0, 191, 255)
dimgray = Color(105, 105, 105)
dodgerblue = Color(30, 144, 255)
firebrick = Color(178, 34, 34)
floralwhite = Color(255, 250, 240)
forestgreen = Color(34, 139, 34)
fuchsia = Color(255, 0, 255)
gainsboro = Color(220, 220, 220)
ghostwhite = Color(248, 248, 255)
gold = Color(255, 215, 0)
goldenrod = Color(218, 165, 32)
gray = Color(128, 128, 128)
green = Color(0, 128, 0)
greenyellow = Color(173, 255, 47)
honeydew = Color(240, 255, 240)
hotpink = Color(255, 105, 180)
indianred = Color(205, 92, 92)
indigo = Color(75, 0, 130)
ivory = Color(255, 255, 240)
khaki = Color(240, 230, 140)
lavender = Color(230, 230, 250)
lavenderblush = Color(255, 240, 245)
lawngreen = Color(124, 252, 0)
lemonchiffon = Color(255, 250, 205)
lightblue = Color(173, 216, 230)
lightcoral = Color(240, 128, 128)
lightcyan = Color(224, 255, 255)
lightgoldenrodyellow = Color(250, 250, 210)
lightgreen = Color(144, 238, 144)
lightgrey = Color(211, 211, 211)
lightpink = Color(255, 182, 193)
lightsalmon = Color(255, 160, 122)
lightseagreen = Color(32, 178, 170)
lightskyblue = Color(135, 206, 250)
lightslategray = Color(119, 136, 153)
lightsteelblue = Color(176, 196, 222)
lightyellow = Color(255, 255, 224)
lime = Color(0, 255, 0)
limegreen = Color(50, 205, 50)
linen = Color(250, 240, 230)
magenta = Color(255, 0, 255)
maroon = Color(128, 0, 0)
mediumaquamarine = Color(102, 205, 170)
mediumblue = Color(0, 0, 205)
mediumorchid = Color(186, 85, 211)
mediumpurple = Color(147, 112, 219)
mediumseagreen = Color(60, 179, 113)
mediumslateblue = Color(123, 104, 238)
mediumspringgreen = Color(0, 250, 154)
mediumturquoise = Color(72, 209, 204)
mediumvioletred = Color(199, 21, 133)
midnightblue = Color(25, 25, 112)
mintcream = Color(245, 255, 250)
mistyrose = Color(255, 228, 225)
moccasin = Color(255, 228, 181)
navajowhite = Color(255, 222, 173)
navy = Color(0, 0, 128)
oldlace = Color(253, 245, 230)
olive = Color(128, 128, 0)
olivedrab = Color(107, 142, 35)
orange = Color(255, 165, 0)
orangered = Color(255, 69, 0)
orchid = Color(218, 112, 214)
palegoldenrod = Color(238, 232, 170)
palegreen = Color(152, 251, 152)
paleturquoise = Color(175, 238, 238)
palevioletred = Color(219, 112, 147)
papayawhip = Color(255, 239, 213)
peachpuff = Color(255, 218, 185)
peru = Color(205, 133, 63)
pink = Color(255, 192, 203)
plum = Color(221, 160, 221)
powderblue = Color(176, 224, 230)
purple = Color(128, 0, 128)
red = Color(255, 0, 0)
rosybrown = Color(188, 143, 143)
royalblue = Color(65, 105, 225)
saddlebrown = Color(139, 69, 19)
salmon = Color(250, 128, 114)
sandybrown = Color(244, 164, 96)
seagreen = Color(46, 139, 87)
seashell = Color(255, 245, 238)
sienna = Color(160, 82, 45)
silver = Color(192, 192, 192)
skyblue = Color(135, 206, 235)
slateblue = Color(106, 90, 205)
slategray = Color(112, 128, 144)
snow = Color(255, 250, 250)
springgreen = Color(0, 255, 127)
steelblue = Color(70, 130, 180)
tan = Color(210, 180, 140)
teal = Color(0, 128, 128)
thistle = Color(216, 191, 216)
tomato = Color(255, 99, 71)
turquoise = Color(64, 224, 208)
violet = Color(238, 130, 238)
wheat = Color(245, 222, 179)
white = Color(255, 255, 255)
whitesmoke = Color(245, 245, 245)
yellow = Color(255, 255, 0)
yellowgreen = Color(154, 205, 50)
del Color
| apache-2.0 |
kynikos/outspline | src/outspline/conf/plugins/wxdbsearch.py | 1 | 1334 | # Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict as OD
data = (
OD((
("enabled", "on"),
)),
OD((
("GlobalShortcuts", (
OD((
("new_search", "Ctrl+f"),
("start_search", ""),
("find_item", ""),
("edit_item", ""),
)),
OD()
)),
("ContextualShortcuts", (
OD((
("search", "s"),
("find", "f"),
("edit", "e"),
)),
OD()
)),
))
)
| gpl-3.0 |
t794104/ansible | lib/ansible/modules/network/dellos6/dellos6_facts.py | 50 | 15435 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2016 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_facts
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Collect facts from remote devices running Dell EMC Networking OS6
description:
- Collects a base set of device facts from a remote device that
is running OS6. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: dellos6
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
default: [ '!config' ]
"""
EXAMPLES = """
# Collect all facts from the device
- dellos6_facts:
gather_subset: all
# Collect only the config and default facts
- dellos6_facts:
gather_subset:
- config
# Do not collect hardware facts
- dellos6_facts:
gather_subset:
- "!interfaces"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device.
returned: always.
type: list
# default
ansible_net_model:
description: The model name returned from the device.
returned: always.
type: str
ansible_net_serialnum:
description: The serial number of the remote device.
returned: always.
type: str
ansible_net_version:
description: The operating system version running on the remote device.
returned: always.
type: str
ansible_net_hostname:
description: The configured hostname of the device.
returned: always.
type: str
ansible_net_image:
description: The image file that the device is running.
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in MB.
returned: When hardware is configured.
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in MB.
returned: When hardware is configured.
type: int
# config
ansible_net_config:
description: The current active config from the device.
returned: When config is configured.
type: str
# interfaces
ansible_net_interfaces:
description: A hash of all interfaces running on the system.
returned: When interfaces is configured.
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device.
returned: When interfaces is configured.
type: dict
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos6.dellos6 import run_commands
from ansible.module_utils.network.dellos6.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = [
'show version',
'show running-config | include hostname'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
hdata = self.responses[1]
self.facts['hostname'] = self.parse_hostname(hdata)
def parse_version(self, data):
facts = dict()
match = re.search(r'HW Version(.+)\s(\d+)', data)
temp, temp_next = data.split('---- ----------- ----------- -------------- --------------')
for en in temp_next.splitlines():
if en == '':
continue
match_image = re.search(r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', en)
version = match_image.group(4)
facts["Version"] = list()
fact = dict()
fact['HW Version'] = match.group(2)
fact['SW Version'] = match_image.group(4)
facts["Version"].append(fact)
return facts
def parse_hostname(self, data):
match = re.search(r'\S+\s(\S+)', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'System Model ID(.+)\s([A-Z0-9]*)\n', data, re.M)
if match:
return match.group(2)
def parse_image(self, data):
match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data)
if match:
return match.group(2)
def parse_serialnum(self, data):
match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data)
if match:
return match.group(2)
class Hardware(FactsBase):
COMMANDS = [
'show memory cpu'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
match = re.findall(r'\s(\d+)\s', data)
if match:
self.facts['memtotal_mb'] = int(match[0]) // 1024
self.facts['memfree_mb'] = int(match[1]) // 1024
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses[0]
class Interfaces(FactsBase):
COMMANDS = [
'show interfaces',
'show interfaces status',
'show interfaces transceiver properties',
'show ip int',
'show lldp',
'show lldp remote-device all',
'show version'
]
def populate(self):
vlan_info = dict()
super(Interfaces, self).populate()
data = self.responses[0]
interfaces = self.parse_interfaces(data)
desc = self.responses[1]
properties = self.responses[2]
vlan = self.responses[3]
version_info = self.responses[6]
vlan_info = self.parse_vlan(vlan, version_info)
self.facts['interfaces'] = self.populate_interfaces(interfaces, desc, properties)
self.facts['interfaces'].update(vlan_info)
if 'LLDP is not enabled' not in self.responses[4]:
neighbors = self.responses[5]
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse_vlan(self, vlan, version_info):
facts = dict()
if "N11" in version_info:
match = re.search(r'IP Address(.+)\s([0-9.]*)\n', vlan)
mask = re.search(r'Subnet Mask(.+)\s([0-9.]*)\n', vlan)
vlan_id_match = re.search(r'Management VLAN ID(.+)\s(\d+)', vlan)
vlan_id = "Vl" + vlan_id_match.group(2)
if vlan_id not in facts:
facts[vlan_id] = list()
fact = dict()
fact['address'] = match.group(2)
fact['masklen'] = mask.group(2)
facts[vlan_id].append(fact)
else:
vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------')
for en in vlan_info_next.splitlines():
if en == '':
continue
match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
intf = match.group(1)
if intf not in facts:
facts[intf] = list()
fact = dict()
matc = re.search(r'^([\w+\s\d]*)\s+(\S+)\s+(\S+)', en)
fact['address'] = matc.group(2)
fact['masklen'] = matc.group(3)
facts[intf].append(fact)
return facts
def populate_interfaces(self, interfaces, desc, properties):
facts = dict()
for key, value in interfaces.items():
intf = dict()
intf['description'] = self.parse_description(key, desc)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(key, properties)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(key, properties)
facts[key] = intf
return facts
def parse_neighbors(self, neighbors):
facts = dict()
neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------')
for en in neighbor_next.splitlines():
if en == '':
continue
intf = self.parse_lldp_intf(en.split()[0])
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['port'] = self.parse_lldp_port(en.split()[3])
if (len(en.split()) > 4):
fact['host'] = self.parse_lldp_host(en.split()[4])
else:
fact['host'] = "Null"
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line)
if match:
key = match.group(2)
parsed[key] = line
else:
parsed[key] += '\n%s' % line
return parsed
def parse_description(self, key, desc):
desc, desc_next = desc.split('--------- --------------- ------ ------- ---- ------ ----- -- -------------------')
if desc_next.find('Oob') > 0:
desc_val, desc_info = desc_next.split('Oob')
elif desc_next.find('Port') > 0:
desc_val, desc_info = desc_next.split('Port')
for en in desc_val.splitlines():
if key in en:
match = re.search(r'^(\S+)\s+(\S+)', en)
if match.group(2) in ['Full', 'N/A']:
return "Null"
else:
return match.group(2)
def parse_macaddress(self, data):
match = re.search(r'Burned In MAC Address(.+)\s([A-Z0-9.]*)\n', data)
if match:
return match.group(2)
def parse_mtu(self, data):
match = re.search(r'MTU Size(.+)\s(\d+)\n', data)
if match:
return int(match.group(2))
def parse_bandwidth(self, data):
match = re.search(r'Port Speed\s*[:\s\.]+\s(\d+)\n', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data)
if match:
return match.group(3)
def parse_mediatype(self, key, properties):
mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag = 1
for en in mediatype_next.splitlines():
if key in en:
flag = 0
match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
if match:
strval = match.group(3)
return strval
if flag == 1:
return "null"
def parse_type(self, key, properties):
type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag = 1
for en in type_val_next.splitlines():
if key in en:
flag = 0
match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
if match:
strval = match.group(2)
return strval
if flag == 1:
return "null"
def parse_lineprotocol(self, data):
data = data.splitlines()
for d in data:
match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d)
if match:
return match.group(1)
def parse_operstatus(self, data):
data = data.splitlines()
for d in data:
match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^([A-Za-z0-9/]*)', data)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'^([A-Za-z0-9-]*)', data)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'^([A-Za-z0-9/]*)', data)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(dellos6_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
lunafeng/django | django/contrib/admin/templatetags/admin_modify.py | 342 | 2505 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
ctx = {
'opts': opts,
'show_delete_link': (
not is_popup and context['has_delete_permission'] and
change and context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (
context['has_add_permission'] and not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': not is_popup and context['has_change_permission'] and show_save_and_continue,
'is_popup': is_popup,
'show_save': show_save,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| bsd-3-clause |
nanolearningllc/edx-platform-cypress-2 | cms/djangoapps/contentstore/features/course-export.py | 58 | 2706 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from lettuce import world, step
from component_settings_editor_helpers import enter_xml_in_advanced_problem
from nose.tools import assert_true, assert_equal
from contentstore.utils import reverse_usage_url
@step('I go to the export page$')
def i_go_to_the_export_page(step):
world.click_tools()
link_css = 'li.nav-course-tools-export a'
world.css_click(link_css)
@step('I export the course$')
def i_export_the_course(step):
step.given('I go to the export page')
world.css_click('a.action-export')
@step('I edit and enter bad XML$')
def i_enter_bad_xml(step):
enter_xml_in_advanced_problem(
step,
"""<problem><h1>Smallest Canvas</h1>
<p>You want to make the smallest canvas you can.</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice>
<choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice>
</choicegroup>
</multiplechoiceresponse>
</problem>"""
)
@step('I edit and enter an ampersand$')
def i_enter_an_ampersand(step):
enter_xml_in_advanced_problem(step, "<problem>&</problem>")
@step('I get an error dialog$')
def get_an_error_dialog(step):
assert_true(world.is_css_present("div.prompt.error"))
@step('I can click to go to the unit with the error$')
def i_click_on_error_dialog(step):
world.css_click("button.action-primary")
problem_string = unicode(world.scenario_dict['COURSE'].id.make_usage_key("problem", 'ignore'))
problem_string = u"Problem {}".format(problem_string[:problem_string.rfind('ignore')])
assert_true(
world.css_html("span.inline-error").startswith(problem_string),
u"{} does not start with {}".format(
world.css_html("span.inline-error"), problem_string
))
# we don't know the actual ID of the vertical. So just check that we did go to a
# vertical page in the course (there should only be one).
vertical_usage_key = world.scenario_dict['COURSE'].id.make_usage_key("vertical", "test")
vertical_url = reverse_usage_url('container_handler', vertical_usage_key)
# Remove the trailing "/None" from the URL - we don't know the course ID, so we just want to
# check that we visited a vertical URL.
if vertical_url.endswith("/test") or vertical_url.endswith("@test"):
vertical_url = vertical_url[:-5]
assert_equal(1, world.browser.url.count(vertical_url))
| agpl-3.0 |
ebonymarieb/django-calaccess-raw-data | example/toolbox/management/commands/createfielddocissues.py | 29 | 5833 | import os
import time
import calculate
from github import Github
from django.conf import settings
from calaccess_raw import get_model_list
from calaccess_raw.management.commands import CalAccessCommand
from django.contrib.humanize.templatetags.humanize import intcomma
class Command(CalAccessCommand):
help = 'Create GitHub issues for model fields without documentation'
def set_options(self, *args, **kwargs):
"""
Hook up with the GitHub API and prepare to create issues.
"""
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.milestone = self.repo.get_milestone(3)
def handle(self, *args, **kwargs):
"""
Make it happen.
"""
self.set_options()
self.header(
"Creating GitHub issues for model fields without documentation"
)
# Loop through all the models and find any fields without docs
field_count = 0
missing_list = []
for m in get_model_list():
field_list = m().get_field_list()
field_count += len(field_list)
for f in field_list:
if not self.has_docs(f):
missing_list.append((m, f))
# If everything is done, declare victory
if not missing_list:
self.success("All %s fields documented!" % field_count)
return False
# If not, loop through the missing and create issues
missing_count = len(missing_list)
self.log(
"- %s/%s (%d%%) of fields lack documentation" % (
intcomma(missing_count),
intcomma(field_count),
calculate.percentage(missing_count, field_count)
)
)
for model, field in missing_list[611:]:
# For now we are excluding the 'other' model module to
# avoid overkill
if model().klass_group != 'other':
self.create_issue(model, field)
def has_docs(self, field):
"""
Test if a Django field has some kind of documentation already.
Returns True or False
"""
if field.name == 'id':
return True
if field.help_text:
return True
if field.__dict__['_verbose_name']:
return True
return False
def create_issue(self, model, field):
"""
Create a GitHub issue for the provided model and field.
"""
title = TITLE_TEMPLATE % (field.name, model().klass_name)
body = BODY_TEMPLATE % (
field.name,
model().klass_name,
model().klass_group,
model().klass_group,
)
self.log("-- Creating issue for %s.%s" % (
model().klass_name,
field.name
)
)
self.repo.create_issue(
title,
body=body,
labels=self.labels,
milestone=self.milestone
)
time.sleep(2.5)
TITLE_TEMPLATE = """
Add documentation for the ``%s`` field on the ``%s`` database model
""".replace("\n", "")
BODY_TEMPLATE = """
## Your mission
Add documentation for the ``%s`` field on the ``%s`` database model.
## Here's how
**Step 1**: Claim this ticket by leaving a comment below. Tell everyone you're ON IT!
**Step 2**: Open up the file that contains this model. It should be in <a href="https://github.com/california-civic-data-coalition/django-calaccess-raw-data/blob/master/calaccess_raw/models/%s.py">calaccess_raw.models.%s.py</a>.
**Step 3**: Hit the little pencil button in the upper-right corner of the code box to begin editing the file.

**Step 4**: Find this model and field in the file. (Clicking into the box and searching with CTRL-F can help you here.) Once you find it, we expect the field to lack the ``help_text`` field typically used in Django to explain what a field contains.
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT"
)
```
**Step 5**: In a separate tab, open up the <a href="Quilmes">official state documentation</a> and find the page that defines all the fields in this model.

**Step 6**: Find the row in that table's definition table that spells out what this field contains. If it lacks documentation. Note that in the ticket and close it now.

**Step 7**: Return to the GitHub tab.
**Step 8**: Add the state's label explaining what's in the field, to our field definition by inserting it a ``help_text`` argument. That should look something like this:
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT",
# Add a help_text argument like the one here, but put your string in instead.
help_text="The other values in record were effective as of this date"
)
```
**Step 9**: Scroll down below the code box and describe the change you've made in the commit message. Press the button below.

**Step 10**: Review your changes and create a pull request submitting them to the core team for inclusion.

That's it! Mission accomplished!
"""
| mit |
rustychris/stompy | docs/conf.py | 1 | 5704 | # -*- coding: utf-8 -*-
#
# stompy documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 19 14:24:20 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, u'/home/rusty/src/stompy/stompy')
# -- General configuration ------------------------------------------------
# seems that some module-level code peeks into _tkinter, so it's not enough
# to mock only that.
autodoc_mock_imports = ['_tkinter',
'tkinter',
'osgeo',
'ogr',
'osgeo.ogr',
'osr',
'qgis',
'nose' ]
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stompy'
copyright = u'2017, Rusty Holleman, et al'
author = u'Rusty Holleman, et al'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'stompydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'stompy.tex', u'stompy Documentation',
u'Rusty Holleman, et al', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'stompy', u'stompy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'stompy', u'stompy Documentation',
author, 'stompy', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| mit |
akaariai/django | django/conf/locale/cs/formats.py | 504 | 1702 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. E Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '05.01.2006', '05.01.06'
'%d. %m. %Y', '%d. %m. %y', # '5. 1. 2006', '5. 1. 06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '04:30:59'
'%H.%M', # '04.30'
'%H:%M', # '04:30'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '05.01.2006 04:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '05.01.2006 04:30:59.000200'
'%d.%m.%Y %H.%M', # '05.01.2006 04.30'
'%d.%m.%Y %H:%M', # '05.01.2006 04:30'
'%d.%m.%Y', # '05.01.2006'
'%d. %m. %Y %H:%M:%S', # '05. 01. 2006 04:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '05. 01. 2006 04:30:59.000200'
'%d. %m. %Y %H.%M', # '05. 01. 2006 04.30'
'%d. %m. %Y %H:%M', # '05. 01. 2006 04:30'
'%d. %m. %Y', # '05. 01. 2006'
'%Y-%m-%d %H.%M', # '2006-01-05 04.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
jandebleser/django-wiki | tests/plugins/attachments/test_views.py | 1 | 5144 | from __future__ import print_function, unicode_literals
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.urlresolvers import reverse
from ...base import ArticleWebTestBase
class AttachmentTests(ArticleWebTestBase):
def setUp(self):
super(AttachmentTests, self).setUp()
self.article = self.root_article
self.test_data = "This is a plain text file"
self.test_description = 'My file'
def _createTxtFilestream(self, strData, **kwargs):
"""
Helper function to create filestream for upload.
Parameters :
strData : str, test string data
Optional Arguments :
filename : str, Defaults to 'test.txt'
"""
filename = kwargs.get('filename', 'test.txt')
data = strData.encode('utf-8')
filedata = BytesIO(data)
filestream = InMemoryUploadedFile(
filedata,
None,
filename,
'text',
len(data),
None
)
return filestream
def _create_test_attachment(self):
url = reverse('wiki:attachments_index', kwargs={'path': ''})
filestream = self._createTxtFilestream(self.test_data)
response = self.c.post(url,
{'description': self.test_description,
'file': filestream,
'save': '1',
})
self.assertRedirects(response, url)
def test_upload(self):
"""
Tests that simple file upload uploads correctly
Uploading a file should preserve the original filename.
Uploading should not modify file in any way.
"""
self._create_test_attachment()
# Check the object was created.
attachment = self.article.shared_plugins_set.all()[0].attachment
self.assertEqual(attachment.original_filename, 'test.txt')
self.assertEqual(attachment.current_revision.file.file.read(), self.test_data.encode('utf-8'))
def test_replace(self):
"""
Tests that previous revisions are not deleted
Tests that only the most recent revision is deleted when
"replace" is checked.
"""
# Upload initial file
url = reverse('wiki:attachments_index', kwargs={'path': ''})
data = "This is a plain text file"
filestream = self._createTxtFilestream(data)
self.c.post(url, {'description': 'My file', 'file': filestream, 'save': '1', })
attachment = self.article.shared_plugins_set.all()[0].attachment
# uploading for the first time should mean that there is only one revision.
self.assertEqual(attachment.attachmentrevision_set.count(), 1)
# Change url to replacement page.
url = reverse(
'wiki:attachments_replace',
kwargs={'attachment_id': attachment.id, 'article_id': self.article.id}
)
# Upload replacement without removing revisions
replacement_data = data + ' And this is my edit'
replacement_filestream = self._createTxtFilestream(replacement_data)
self.c.post(
url,
{
'description': 'Replacement upload',
'file': replacement_filestream,
}
)
attachment = self.article.shared_plugins_set.all()[0].attachment
# Revision count should be two
self.assertEqual(attachment.attachmentrevision_set.count(), 2)
# Original filenames should not be modified
self.assertEqual(attachment.original_filename, 'test.txt')
# Latest revision should equal replacment_data
self.assertEqual(attachment.current_revision.file.file.read(), replacement_data.encode('utf-8'))
first_replacement = attachment.current_revision
# Upload another replacement, this time removing most recent revision
replacement_data2 = data + ' And this is a different edit'
replacement_filestream2 = self._createTxtFilestream(replacement_data2)
self.c.post(
url,
{
'description': 'Replacement upload',
'file': replacement_filestream2,
'replace': 'on',
}
)
attachment = self.article.shared_plugins_set.all()[0].attachment
# Revision count should still be two
self.assertEqual(attachment.attachmentrevision_set.count(), 2)
# Latest revision should equal replacment_data2
self.assertEqual(attachment.current_revision.file.file.read(), replacement_data2.encode('utf-8'))
# The first replacement should no longer be in the filehistory
self.assertNotIn(first_replacement, attachment.attachmentrevision_set.all())
def test_search(self):
"""
Call the search view
"""
self._create_test_attachment()
url = reverse('wiki:attachments_search', kwargs={'path': ''})
response = self.c.get(url, {'query': self.test_description})
self.assertContains(response, self.test_description)
| gpl-3.0 |
nashve/mythbox | resources/test/mythboxtest/test_util.py | 7 | 20061 | #
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os
import time
import unittest2 as unittest
import mythboxtest
#from mockito import Mock, when, any
from mythbox.util import *
from mythbox.platform import getPlatform, Platform, WindowsPlatform, MacPlatform, UnixPlatform
log = mythboxtest.getLogger('mythbox.unittest')
class RunAsyncDecoratorTest(unittest.TestCase):
@run_async
def print_somedata(self):
log.debug('starting print_somedata')
time.sleep(2)
log.debug('print_somedata: 2 sec passed')
time.sleep(2)
log.debug('print_somedata: 2 sec passed')
time.sleep(2)
log.debug('finished print_somedata')
def test_run_async(self):
t1 = self.print_somedata()
log.debug('back in main after t1 spawned')
t2 = self.print_somedata()
log.debug('back in main after t2 spawned')
t3 = self.print_somedata()
log.debug('back in main after t3 spawned')
t1.join()
t2.join()
t3.join()
# TODO: assertions
def test_waitForWorkersToDie(self):
# setup
clearWorkers()
t1 = self.print_somedata()
log.debug('back in main after t1 spawned')
t2 = self.print_somedata()
log.debug('back in main after t2 spawned')
t3 = self.print_somedata()
log.debug('back in main after t3 spawned')
# test
waitForWorkersToDie()
# verify
self.assertFalse(t1.isAlive())
self.assertFalse(t2.isAlive())
self.assertFalse(t3.isAlive())
class CoalesceDecoratorTest(unittest.TestCase):
def setUp(self):
self.barTimes = 0
self.fooTimes = 0
self.bazTimes = 0
@coalesce
def foo(self, delay=0.5):
self.fooTimes += 1
time.sleep(delay)
@run_async
@coalesce
def bar(self, delay=2):
self.barTimes += 1
time.sleep(delay)
@run_async
@coalesce
def baz(self, delay=2):
self.bazTimes += 1
time.sleep(delay)
def test_When_foo_coalesced_on_single_thread_Then_acts_like_plain_old_synchronous_method_call(self):
self.foo()
self.foo()
self.foo()
self.assertEquals(3, self.fooTimes)
def test_When_foo_called_by_two_threads_Then_thread1_runs_foo_and_thread2_returns_immediately(self):
t1 = self.bar(2)
time.sleep(0.5)
t2 = self.bar()
t1.join()
t2.join()
self.assertEquals(1, self.barTimes)
def test_When_bar_called_by_bunch_of_threads_Then_thread1_runs_bar_and_all_others_return_immediately(self):
t = []
for i in range(0,20):
t.append(self.bar())
for i in range(0,20):
t[i].join()
self.assertEquals(1, self.barTimes)
def test_When_previous_calls_have_been_coalesced_and_completed_and_bar_is_called_Then_runs_bar_again(self):
t1 = self.bar(2)
time.sleep(0.5)
t2 = self.bar() # coalesced
self.assertEquals(1, self.barTimes)
time.sleep(2) # expire
t3 = self.bar(1)
t1.join()
t2.join()
t3.join()
self.assertEquals(2, self.barTimes)
def test_When_coalescing_bar_Then_should_have_no_affect_on_coalescing_baz(self):
t1 = self.bar(2)
t3 = self.bar()
t2 = self.baz(2)
t4 = self.baz()
t1.join()
t2.join()
t3.join()
t4.join()
self.assertEquals(1, self.barTimes)
self.assertEquals(1, self.bazTimes)
class ModuleTest(unittest.TestCase):
def test_to_kwargs_When_args_not_empty_Then_returns_dict_with_args(self):
class Boo(object):
def __init__(self, *args, **kwargs):
self.one = "1"
self.two = "2"
self.three = "3"
obj = Boo()
kwargs = to_kwargs(obj, ['three', 'two', 'one'])
self.assertTrue(kwargs['one'] == obj.one)
self.assertTrue(kwargs['two'] == obj.two)
self.assertTrue(kwargs['three'] == obj.three)
def test_my_understanding_of_mixing_kwargs_with_double_asterisk_on_method_invocation(self):
def foo(a=1, b=2, c=3):
self.assertTrue(a == 5)
self.assertTrue(b == 6)
self.assertTrue(c == 7)
foo(a=5, **{'b':6, 'c':7})
def test_formatSize(self):
self.assertEquals("1,024.00 KB", formatSize(1024, False))
self.assertEquals("100.00 GB", formatSize(1024*1000*100, True))
self.assertEquals("100,000.00 MB", formatSize(1024*1000*100, False))
self.assertEquals("1,000.00 GB", formatSize(1024*1000*1000, True))
self.assertEquals("4,000.00 GB", formatSize(1024*1000*1000*4, True))
self.assertEquals("8,000.00 GB", formatSize(1024*1000*1000*8, True))
self.assertEquals("10,000.00 GB", formatSize(1024*1000*1000*10, True))
self.assertEquals("100,000.00 GB", formatSize(1024*1000*1000*100, True))
def test_formatSeconds(self):
self.assertEquals('0s', formatSeconds(0.00))
self.assertEquals('1s', formatSeconds(1.99))
self.assertEquals('5m', formatSeconds(60*5))
self.assertEquals('5m 45s', formatSeconds(60*5+45))
self.assertEquals('3h 5m 45s', formatSeconds(3*60*60 + 60*5 + 45))
self.assertEquals('3h', formatSeconds(3*60*60))
self.assertEquals('3h 59m', formatSeconds(3*60*60 + 60*59))
self.assertEquals('3h 5s', formatSeconds(3*60*60 + 5))
def test_which_ExecutableFound(self):
platform = getPlatform()
if type(platform) == WindowsPlatform:
exe = "cmd.exe"
elif type(platform) in (UnixPlatform, MacPlatform):
exe = "true"
else:
log.warn('Skipping test. Platform not supported')
return
exepath = which(exe)
log.debug('which found %s' % exepath)
self.assertFalse(exepath is None)
def test_which_ExecutableNotFound(self):
platform = getPlatform()
if type(platform) == WindowsPlatform:
exe = "bogus_executable_name.exe"
elif type(platform) in (UnixPlatform, MacPlatform):
exe = "bogus_executable_name"
else:
log.warn("Skipping test. Platform not supported")
return
exepath = which(exe)
self.assertTrue(exepath is None)
def test_slice_When_items_empty_Then_returns_num_empty_lists(self):
items = []
queues = slice(items, 4)
self.assertEquals(4, len(queues))
for q in queues:
self.assertTrue(len(q) == 0)
def test_slice_When_items_lt_num_Then_returns_num_minus_items_empty_lists_at_end(self):
items = [1,2]
queues = slice(items, 4)
self.assertEquals(4, len(queues))
self.assertTrue(len(queues[0]) == 1 and queues[0][0] == 1)
self.assertTrue(len(queues[1]) == 1 and queues[1][0] == 2)
self.assertTrue(len(queues[2]) == 0 and len(queues[3]) == 0)
def test_slice_When_items_eq_num_Then_returns_num_lists_with_each_item(self):
items = [1,2]
queues = slice(items, 2)
self.assertEquals(2, len(queues))
self.assertTrue(len(queues[0]) == 1 and queues[0][0] == 1)
self.assertTrue(len(queues[1]) == 1 and queues[1][0] == 2)
def test_slice_When_items_gt_num_Then_returns_num_lists_with_items_wrapping_around(self):
items = [1,2,3,4]
queues = slice(items, 2)
self.assertEquals(2, len(queues))
self.assertTrue(len(queues[0]) == 2 and queues[0][0] == 1 and queues[0][1] == 3)
self.assertTrue(len(queues[1]) == 2 and queues[1][0] == 2 and queues[1][1] == 4)
class TimedDecoratorTest(unittest.TestCase):
def test_DecoratorPrintsOutWarningWhenExecutionTimeExceedsOneSecond(self):
self.foo()
# observe results
@timed
def foo(self):
log.debug('waiting 1.2 seconds...')
time.sleep(1.2)
class SynchronizedDecoratorTest(unittest.TestCase):
def setUp(self):
self.fooLock = 0
self.barLock = 0
self.bazLock = 0
@synchronized
def foo(self, delay=0.5):
self.assertEquals(0, self.fooLock)
self.fooLock += 1
self.assertEquals(1, self.fooLock)
time.sleep(delay)
self.assertEquals(1, self.fooLock)
self.fooLock -= 1
self.assertEquals(0, self.fooLock)
@run_async
@synchronized
def bar(self, delay=0.5):
self.assertEquals(0, self.barLock)
self.barLock += 1
self.assertEquals(1, self.barLock)
time.sleep(delay)
self.assertEquals(1, self.barLock)
self.barLock -= 1
self.assertEquals(0, self.barLock)
@run_async
@synchronized
def baz(self, delay=0.5):
self.assertEquals(1, self.barLock) # verify bar() is getting executed concurrently
time.sleep(delay)
self.assertEquals(1, self.barLock) # verify bar() is getting executed concurrently
def test_When_foo_synchronized_on_single_thread_Then_acts_like_plain_old_synchronous_method_call(self):
self.foo()
self.foo()
self.foo()
def test_When_bar_called_from_multiple_threads_Then_access_is_synchronized(self):
t1 = self.bar()
t2 = self.bar()
t3 = self.bar()
t1.join()
t2.join()
t3.join()
# assertions are internal to bar()
def test_When_syncing_on_bar_Then_doesnt_affect_syncing_on_baz(self):
t1 = self.bar(5)
time.sleep(1)
t4 = self.baz(1)
t5 = self.baz(1)
t6 = self.baz(1)
t1.join()
t4.join()
t5.join()
t6.join()
# assertions are internal to baz()
class NativeTranslatorTest(unittest.TestCase):
def test_get_ByIntegerIdReturnsString(self):
translator = NativeTranslator(os.getcwd())
s = translator.get(0)
log.debug('localized = %s' % s)
self.assertEquals('TODO', s)
def test_get_ByStringReturnsString(self):
translator = NativeTranslator(os.getcwd())
s = translator.get('MythBox')
log.debug('localized = %s' % s)
self.assertEquals('MythBox', s)
def test_get_ByUnicodeStringReturnsUnicodeString(self):
translator = NativeTranslator(os.getcwd())
s = translator.get(u'MythBox')
#log.debug('localized = %s' % s)
self.assertEquals(u'MythBox', s)
class BoundedEvictingQueueTest(unittest.TestCase):
def test_put_FillingToCapacityPlusOneEvictsFirstItem(self):
q = BoundedEvictingQueue(3)
q.put(1)
q.put(2)
q.put(3)
self.assertTrue(q.full())
q.put(4)
self.assertTrue(q.full())
self.assertEquals(3, q.qsize())
self.assertEquals(2, q.get())
self.assertFalse(q.full())
self.assertEquals(3, q.get())
self.assertEquals(4, q.get())
self.assertTrue(q.empty())
#
# Requires interactivity
#
#class OnDemandConfigTest(unittest.TestCase):
#
# def test_get_NonExistentKey(self):
# config = util.OnDemandConfig('crap.ini')
# value = config.get('blah')
# log.debug('Value = %s' % value)
class MockPlatform(Platform):
"""
Mock platform impl that directs unit tests to load resources from the
./resources/test/mock* directories
"""
def addLibsToSysPath(self):
pass
def getName(self):
return "N/A"
def getScriptDir(self):
return os.path.join(os.getcwd(), 'resources', 'test', 'test_util', 'xbmc')
def getScriptDataDir(self):
return os.path.join(os.getcwd(), 'resources', 'test', 'test_util', 'dotxbmc')
def getHostname(self):
return 'hostname'
def isUnix(self):
return True
def getDefaultRecordingsDir(self):
return ''
class BidiIteratorTest(unittest.TestCase):
def test_When_list_empty_Then_raise_StopIteration(self):
self.failUnlessRaises(StopIteration, BidiIterator([]).next)
self.failUnlessRaises(StopIteration, BidiIterator([]).previous)
self.failUnlessRaises(StopIteration, BidiIterator([]).current)
self.assertTrue(BidiIterator([]).index() is None)
def test_When_next_and_len_is_1_Then_return_1(self):
bi = BidiIterator(['a'])
self.assertEquals('a', bi.next())
self.assertEquals('a', bi.current())
self.assertEquals(0, bi.index())
def test_When_previous_and_len_is_1_Then_raise_StopIteration(self):
self.failUnlessRaises(StopIteration, BidiIterator(['a']).previous)
def test_When_next_and_reach_end_of_list_Then_raise_StopIteration(self):
bi = BidiIterator(['a'])
self.assertEquals('a', bi.next())
self.failUnlessRaises(StopIteration, bi.next)
def test_When_many_next_and_reach_end_of_list_Then_raise_StopIteration(self):
bi = BidiIterator(['a','b','c','d'])
self.assertEquals('a', bi.next())
self.assertEquals('b', bi.next())
self.assertEquals('c', bi.next())
self.assertEquals('d', bi.next())
self.failUnlessRaises(StopIteration, bi.next)
def test_next_previous_combo(self):
bi = BidiIterator(['a','b','c','d'])
self.assertEquals('a', bi.next())
self.assertEquals('b', bi.next())
self.assertEquals('a', bi.previous())
self.assertEquals('b', bi.next())
self.assertEquals('c', bi.next())
self.assertEquals('d', bi.next())
self.failUnlessRaises(StopIteration, bi.next)
self.assertEquals('c', bi.previous())
self.assertEquals('b', bi.previous())
self.assertEquals('a', bi.previous())
self.failUnlessRaises(StopIteration, bi.previous)
def test_next_with_initial_position_specified(self):
self.assertEquals('b', BidiIterator(['a','b','c','d'], 0).next())
self.assertEquals('c', BidiIterator(['a','b','c','d'], 1).next())
self.assertEquals('d', BidiIterator(['a','b','c','d'], 2).next())
self.assertEquals(2, BidiIterator(['a','b','c','d'], 2).index())
self.failUnlessRaises(StopIteration, BidiIterator(['a','b','c','d'], 3).next)
def test_previous_with_initial_position_specified(self):
self.failUnlessRaises(StopIteration, BidiIterator(['a','b','c','d'], 0).previous)
self.assertEquals('a', BidiIterator(['a','b','c','d'], 1).previous())
self.assertEquals('b', BidiIterator(['a','b','c','d'], 2).previous())
self.assertEquals('c', BidiIterator(['a','b','c','d'], 3).previous())
def test_current_and_len_is_1_Then_raise_StopIteration(self):
self.failUnlessRaises(StopIteration, BidiIterator(['a']).current)
def test_current_after_next_and_len_is_1_Then_return_1(self):
bi = BidiIterator(['a'])
bi.next()
self.assertEquals('a', bi.current())
self.assertEquals(0, bi.index())
def test_current_with_len_1_and_initial_pos_0_returns_1(self):
bi = BidiIterator(['a'], 0)
self.assertEquals('a', bi.current())
class CyclingBidiIteratorTest(unittest.TestCase):
def test_When_list_empty_Then_raise_StopIteration(self):
self.failUnlessRaises(StopIteration, CyclingBidiIterator([]).next)
self.failUnlessRaises(StopIteration, CyclingBidiIterator([]).previous)
def test_When_previous_and_len_is_1_Then_always_return_1(self):
bi = CyclingBidiIterator(['a'])
self.assertEquals('a', bi.previous())
self.assertEquals('a', bi.previous())
self.assertEquals('a', bi.previous())
def test_When_previous_and_len_gt_1_Then_wrap_around(self):
bi = CyclingBidiIterator(['a', 'b', 'c'])
self.assertEquals('c', bi.previous())
self.assertEquals('b', bi.previous())
self.assertEquals('a', bi.previous())
self.assertEquals('c', bi.previous())
self.assertEquals('b', bi.previous())
self.assertEquals('a', bi.previous())
def test_When_next_and_len_is_1_Then_always_return_1(self):
bi = CyclingBidiIterator(['a'])
self.assertEquals('a', bi.next())
self.assertEquals('a', bi.next())
self.assertEquals('a', bi.next())
def test_When_next_and_len_gt_1_Then_wrap_around(self):
bi = CyclingBidiIterator(['a', 'b', 'c'])
self.assertEquals('a', bi.next())
self.assertEquals('b', bi.next())
self.assertEquals('c', bi.next())
self.assertEquals('a', bi.next())
self.assertEquals('b', bi.next())
self.assertEquals('c', bi.next())
def test_next_previous_combo(self):
bi = CyclingBidiIterator(['a','b','c','d'])
self.assertEquals('a', bi.next())
self.assertEquals('b', bi.next())
self.assertEquals('a', bi.previous())
self.assertEquals('b', bi.next())
self.assertEquals('c', bi.next())
self.assertEquals('d', bi.next())
self.assertEquals('a', bi.next())
self.assertEquals('d', bi.previous())
self.assertEquals('c', bi.previous())
self.assertEquals('b', bi.previous())
self.assertEquals('a', bi.previous())
self.assertEquals('d', bi.previous())
def test_next_with_initial_position_specified(self):
self.assertEquals('b', CyclingBidiIterator(['a','b','c','d'], 0).next())
self.assertEquals('c', CyclingBidiIterator(['a','b','c','d'], 1).next())
self.assertEquals('d', CyclingBidiIterator(['a','b','c','d'], 2).next())
self.assertEquals('a', CyclingBidiIterator(['a','b','c','d'], 3).next())
def test_previous_with_initial_position_specified(self):
self.assertEquals('d', CyclingBidiIterator(['a','b','c','d'], 0).previous())
self.assertEquals('a', CyclingBidiIterator(['a','b','c','d'], 1).previous())
self.assertEquals('b', CyclingBidiIterator(['a','b','c','d'], 2).previous())
self.assertEquals('c', CyclingBidiIterator(['a','b','c','d'], 3).previous())
class TimedCacheDecoratorTest(unittest.TestCase):
@timed_cache(seconds=2)
def foo(self):
self.x += 1
return self.x
def test_timed_cache_works(self):
self.x = 1
self.assertTrue(2, self.foo())
time.sleep(1)
for i in xrange(100):
self.assertTrue(2, self.foo())
time.sleep(2)
self.assertTrue(3, self.foo())
class MaxThreadsDecoratorTest(unittest.TestCase):
@run_async
@max_threads(3)
def foo(self):
self.count += 1
self.assertTrue(self.count <= 3)
time.sleep(0.2)
self.assertTrue(self.count <= 3)
self.count -= 1
def test_max_threads_works(self):
self.count = 0
workers = []
for i in xrange(20):
workers.append(self.foo())
for w in workers:
w.join()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.