repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
pombredanne/scrapy | scrapy/commands/fetch.py | 113 | 2063 | from __future__ import print_function
from w3lib.url import is_url
from scrapy.commands import ScrapyCommand
from scrapy.http import Request
from scrapy.exceptions import UsageError
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
class Command(ScrapyCommand):
requires_project = False
def syntax(self):
return "[options] <url>"
def short_desc(self):
return "Fetch a URL using the Scrapy downloader"
def long_desc(self):
return "Fetch a URL using the Scrapy downloader and print its content " \
"to stdout. You may want to use --nolog to disable logging"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--spider", dest="spider",
help="use this spider")
parser.add_option("--headers", dest="headers", action="store_true", \
help="print response HTTP headers instead of body")
def _print_headers(self, headers, prefix):
for key, values in headers.items():
for value in values:
print('%s %s: %s' % (prefix, key, value))
def _print_response(self, response, opts):
if opts.headers:
self._print_headers(response.request.headers, '>')
print('>')
self._print_headers(response.headers, '<')
else:
print(response.body)
def run(self, args, opts):
if len(args) != 1 or not is_url(args[0]):
raise UsageError()
cb = lambda x: self._print_response(x, opts)
request = Request(args[0], callback=cb, dont_filter=True)
request.meta['handle_httpstatus_all'] = True
spidercls = DefaultSpider
spider_loader = self.crawler_process.spider_loader
if opts.spider:
spidercls = spider_loader.load(opts.spider)
else:
spidercls = spidercls_for_request(spider_loader, request, spidercls)
self.crawler_process.crawl(spidercls, start_requests=lambda: [request])
self.crawler_process.start()
| bsd-3-clause |
MiLk/ansible | lib/ansible/modules/network/illumos/dladm_etherstub.py | 70 | 4639 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_etherstub
short_description: Manage etherstubs on Solaris/illumos systems.
description:
- Create or delete etherstubs on Solaris/illumos systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- Etherstub name.
required: true
temporary:
description:
- Specifies that the etherstub is temporary. Temporary etherstubs
do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Create or delete Solaris/illumos etherstub.
required: false
default: "present"
choices: [ "present", "absent" ]
'''
EXAMPLES = '''
# Create 'stub0' etherstub
- dladm_etherstub:
name: stub0
state: present
# Remove 'stub0 etherstub
- dladm_etherstub:
name: stub0
state: absent
'''
RETURN = '''
name:
description: etherstub name
returned: always
type: string
sample: "switch0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: etherstub's persistence
returned: always
type: boolean
sample: "True"
'''
class Etherstub(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.temporary = module.params['temporary']
self.state = module.params['state']
def etherstub_exists(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('show-etherstub')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def create_etherstub(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('create-etherstub')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_etherstub(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('delete-etherstub')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
etherstub = Etherstub(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = etherstub.name
result['state'] = etherstub.state
result['temporary'] = etherstub.temporary
if etherstub.state == 'absent':
if etherstub.etherstub_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = etherstub.delete_etherstub()
if rc != 0:
module.fail_json(name=etherstub.name, msg=err, rc=rc)
elif etherstub.state == 'present':
if not etherstub.etherstub_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = etherstub.create_etherstub()
if rc is not None and rc != 0:
module.fail_json(name=etherstub.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
rottenbytes/Sick-Beard | sickbeard/clients/requests/packages/charade/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
mou4e/zirconium | tools/telemetry/telemetry/core/possible_browser.py | 21 | 1405 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
return self._supports_tab_control
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, finder_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
| bsd-3-clause |
ivansib/sib16 | contrib/linearize/linearize-hashes.py | 1 | 3037 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 1944
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
BryceARich/TransactionsMR | python/src/cloudstorage/rest_api.py | 63 | 8753 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Base and helper classes for Google RESTful APIs."""
__all__ = ['add_sync_methods']
import logging
import os
import random
import time
from . import api_utils
try:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper
def add_sync_methods(cls):
"""Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
"""
for name in cls.__dict__.keys():
if name.endswith('_async'):
sync_name = name[:-6]
if not hasattr(cls, sync_name):
setattr(cls, sync_name, _make_sync_method(name))
return cls
class _AE_TokenStorage_(ndb.Model):
"""Entity to store app_identity tokens in memcache."""
token = ndb.StringProperty()
expires = ndb.FloatProperty()
@ndb.tasklet
def _make_token_async(scopes, service_account_id):
"""Get a fresh authentication token.
Args:
scopes: A list of scopes.
service_account_id: Internal-use only.
Raises:
An ndb.Return with a tuple (token, expiration_time) where expiration_time is
seconds since the epoch.
"""
rpc = app_identity.create_rpc()
app_identity.make_get_access_token_call(rpc, scopes, service_account_id)
token, expires_at = yield rpc
raise ndb.Return((token, expires_at))
class _RestApi(object):
"""Base class for REST-based API wrapper classes.
This class manages authentication tokens and request retries. All
APIs are available as synchronous and async methods; synchronous
methods are synthesized from async ones by the add_sync_methods()
function in this module.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
"""
def __init__(self, scopes, service_account_id=None, token_maker=None,
retry_params=None):
"""Constructor.
Args:
scopes: A scope or a list of scopes.
service_account_id: Internal use only.
token_maker: An asynchronous function of the form
(scopes, service_account_id) -> (token, expires).
retry_params: An instance of api_utils.RetryParams. If None, the
default for current thread will be used.
"""
if isinstance(scopes, basestring):
scopes = [scopes]
self.scopes = scopes
self.service_account_id = service_account_id
self.make_token_async = token_maker or _make_token_async
if not retry_params:
retry_params = api_utils._get_default_retry_params()
self.retry_params = retry_params
self.user_agent = {'User-Agent': retry_params._user_agent}
self.expiration_headroom = random.randint(60, 240)
def __getstate__(self):
"""Store state as part of serialization/pickling."""
return {'scopes': self.scopes,
'id': self.service_account_id,
'a_maker': (None if self.make_token_async == _make_token_async
else self.make_token_async),
'retry_params': self.retry_params,
'expiration_headroom': self.expiration_headroom}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling."""
self.__init__(state['scopes'],
service_account_id=state['id'],
token_maker=state['a_maker'],
retry_params=state['retry_params'])
self.expiration_headroom = state['expiration_headroom']
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Issue one HTTP request.
It performs async retries using tasklets.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
Yields:
The async fetch of the url.
"""
retry_wrapper = api_utils._RetryWrapper(
self.retry_params,
retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS,
should_retry=api_utils._should_retry)
resp = yield retry_wrapper.run(
self.urlfetch_async,
url=url,
method=method,
headers=headers,
payload=payload,
deadline=deadline,
callback=callback,
follow_redirects=False)
raise ndb.Return((resp.status_code, resp.headers, resp.content))
@ndb.tasklet
def get_token_async(self, refresh=False):
"""Get an authentication token.
The token is cached in memcache, keyed by the scopes argument.
Uses a random token expiration headroom value generated in the constructor
to eliminate a burst of GET_ACCESS_TOKEN API requests.
Args:
refresh: If True, ignore a cached token; default False.
Yields:
An authentication token. This token is guaranteed to be non-expired.
"""
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes))
ts = yield _AE_TokenStorage_.get_by_id_async(
key, use_cache=True, use_memcache=True,
use_datastore=self.retry_params.save_access_token)
if refresh or ts is None or ts.expires < (
time.time() + self.expiration_headroom):
token, expires_at = yield self.make_token_async(
self.scopes, self.service_account_id)
timeout = int(expires_at - time.time())
ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at)
if timeout > 0:
yield ts.put_async(memcache_timeout=timeout,
use_datastore=self.retry_params.save_access_token,
use_cache=True, use_memcache=True)
raise ndb.Return(ts.token)
@ndb.tasklet
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
try:
self.token = yield self.get_token_async()
except app_identity.InternalError, e:
if os.environ.get('DATACENTER', '').endswith('sandman'):
self.token = None
logging.warning('Could not fetch an authentication token in sandman '
'based Appengine devel setup; proceeding without one.')
else:
raise e
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp)
_RestApi = add_sync_methods(_RestApi)
| apache-2.0 |
VitalPet/sale-workflow | sale_quotation_sourcing/tests/test_procurement_group.py | 34 | 5432 | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import openerp.tests.common as test_common
from openerp import fields
class TestProcurementGroup(test_common.TransactionCase):
def test_sourced_by_no_po(self):
""" Test it creates only one Procurement Group if not sourced
Procurement Group will be named after sale's name
"""
so_line = self.env['sale.order.line'].create({
'order_id': self.sale.id,
'name': 'Line', # required
'price_unit': 99, # required
# no sourced_by
})
self.sale.action_ship_create()
self.assertTrue(so_line.procurement_group_id)
self.assertEqual(so_line.procurement_group_id.name,
self.sale.name)
def test_all_sourced_by_a_single_po(self):
""" Test it creates only one Procurement Group if same PO on all lines
Procurement Group will be named after sale and purchase names
"""
so_line1 = self.env['sale.order.line'].create({
'order_id': self.sale.id,
'sourced_by': self.po1_line1.id,
'name': 'Line1', # required
'price_unit': 99, # required
})
so_line2 = self.env['sale.order.line'].create({
'order_id': self.sale.id,
'sourced_by': self.po1_line2.id,
'name': 'Line2', # required
'price_unit': 99, # required
})
self.sale.action_ship_create()
self.assertTrue(so_line1.procurement_group_id)
self.assertTrue(so_line2.procurement_group_id)
# Ensure we only one procurement group
self.assertEqual(so_line1.procurement_group_id,
so_line2.procurement_group_id)
self.assertEqual(
so_line1.procurement_group_id.name,
self.sale.name + '/' + self.po1.name
)
def test_sourced_by_multiple_po(self):
""" Test it creates one Procurement Group per PO """
so_line1 = self.env['sale.order.line'].create({
'order_id': self.sale.id,
'sourced_by': self.po1_line1.id,
'name': 'Line1', # required
'price_unit': 99, # required
})
so_line2 = self.env['sale.order.line'].create({
'order_id': self.sale.id,
'sourced_by': self.po2_line.id,
'name': 'Line2', # required
'price_unit': 99, # required
})
self.sale.action_ship_create()
self.assertTrue(so_line1.procurement_group_id)
self.assertTrue(so_line2.procurement_group_id)
# Ensure we have 2 different procurement groups
self.assertNotEqual(so_line1.procurement_group_id,
so_line2.procurement_group_id)
self.assertEqual(
so_line1.procurement_group_id.name,
self.sale.name + '/' + self.po1.name
)
self.assertEqual(
so_line2.procurement_group_id.name,
self.sale.name + '/' + self.po2.name
)
def setUp(self):
super(TestProcurementGroup, self).setUp()
self.po1 = self.env['purchase.order'].create({
'name': 'PO1',
'partner_id': self.ref('base.res_partner_2'), # required
'location_id': self.ref('stock.stock_location_stock'), # required
'pricelist_id': 1, # required
})
self.po2 = self.env['purchase.order'].create({
'name': 'PO2',
'partner_id': self.ref('base.res_partner_2'), # required
'location_id': self.ref('stock.stock_location_stock'), # required
'pricelist_id': 1, # required
})
self.po1_line1 = self.env['purchase.order.line'].create({
'name': 'PO1L1', # required
'partner_id': self.ref('base.res_partner_2'), # required
'order_id': self.po1.id,
'price_unit': 99, # required
'date_planned': fields.Datetime.now(), # required
})
self.po1_line2 = self.env['purchase.order.line'].create({
'name': 'PO1L2', # required
'order_id': self.po1.id,
'price_unit': 99, # required
'date_planned': fields.Datetime.now(), # required
})
self.po2_line = self.env['purchase.order.line'].create({
'name': 'PO2L1', # required
'order_id': self.po2.id,
'price_unit': 99, # required
'date_planned': fields.Datetime.now(), # required
})
self.sale = self.env['sale.order'].create({
'name': 'SO1',
'partner_id': self.ref('base.res_partner_12'), # required
})
| agpl-3.0 |
ejpbruel/servo | components/script/dom/bindings/codegen/ply/ply/lex.py | 344 | 40739 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| mpl-2.0 |
google/grr | grr/server/grr_response_server/flows/cron/system_test.py | 1 | 8396 | #!/usr/bin/env python
"""System cron flows tests."""
from absl import app
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_server import client_report_utils
from grr_response_server import data_store
from grr_response_server.databases import db
from grr_response_server.flows.cron import system
from grr_response_server.rdfvalues import cronjobs as rdf_cronjobs
from grr.test_lib import test_lib
class SystemCronJobTest(test_lib.GRRBaseTest):
"""Test system cron jobs."""
def setUp(self):
super().setUp()
one_hour_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
1, rdfvalue.HOURS)
eight_day_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
8, rdfvalue.DAYS)
ancient_ping = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
61, rdfvalue.DAYS)
self.SetupClientsWithIndices(
range(0, 10), system="Windows", ping=eight_day_ping)
self.SetupClientsWithIndices(
range(10, 20), system="Linux", ping=eight_day_ping)
self.SetupClientsWithIndices(
range(20, 22),
system="Darwin",
fleetspeak_enabled=True,
ping=one_hour_ping)
# These clients shouldn't be analyzed by any of the stats cronjobs.
self.SetupClientsWithIndices(
range(22, 24), system="Linux", ping=ancient_ping)
for i in range(0, 10):
client_id = "C.1%015x" % i
data_store.REL_DB.AddClientLabels(client_id, "GRR", ["Label1", "Label2"])
data_store.REL_DB.AddClientLabels(client_id, "jim", ["UserLabel"])
def _CheckVersionGraph(self, graph, expected_title, expected_count):
self.assertEqual(graph.title, expected_title)
if expected_count == 0:
self.assertEmpty(graph)
return
sample = graph[0]
self.assertEqual(sample.label,
"GRR Monitor %s" % config.CONFIG["Source.version_numeric"])
self.assertEqual(sample.y_value, expected_count)
def _CheckVersionStats(self, label, report_type, counts):
# We expect to have 1, 7, 14 and 30-day graphs for every label.
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, report_type)
self._CheckVersionGraph(graph_series.graphs[0],
"1 day actives for %s label" % label, counts[0])
self._CheckVersionGraph(graph_series.graphs[1],
"7 day actives for %s label" % label, counts[1])
self._CheckVersionGraph(graph_series.graphs[2],
"14 day actives for %s label" % label, counts[2])
self._CheckVersionGraph(graph_series.graphs[3],
"30 day actives for %s label" % label, counts[3])
def _CheckGRRVersionBreakDown(self):
"""Checks the result of the GRRVersionBreakDown cron job."""
# All machines should be in All once. Windows machines should be in Label1
# and Label2. There should be no stats for UserLabel.
report_type = rdf_stats.ClientGraphSeries.ReportType.GRR_VERSION
self._CheckVersionStats("All", report_type, [2, 2, 22, 22])
self._CheckVersionStats("Label1", report_type, [0, 0, 10, 10])
self._CheckVersionStats("Label2", report_type, [0, 0, 10, 10])
def _CheckOSGraph(self, graph, expected_title, expected_counts):
actual_counts = {s.label: s.y_value for s in graph}
self.assertEqual(graph.title, expected_title)
self.assertDictEqual(actual_counts, expected_counts)
def _CheckOSStats(self, label, report_type, counts):
# We expect to have 1, 7, 14 and 30-day graphs for every label.
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, report_type)
self._CheckOSGraph(graph_series.graphs[0],
"1 day actives for %s label" % label, counts[0])
self._CheckOSGraph(graph_series.graphs[1],
"7 day actives for %s label" % label, counts[1])
self._CheckOSGraph(graph_series.graphs[2],
"14 day actives for %s label" % label, counts[2])
self._CheckOSGraph(graph_series.graphs[3],
"30 day actives for %s label" % label, counts[3])
def _CheckOSBreakdown(self):
report_type = rdf_stats.ClientGraphSeries.ReportType.OS_TYPE
all_stats = [
{
"Darwin": 2
},
{
"Darwin": 2
},
{
"Linux": 10,
"Windows": 10,
"Darwin": 2
},
{
"Linux": 10,
"Windows": 10,
"Darwin": 2
},
]
label_stats = [{}, {}, {"Windows": 10}, {"Windows": 10}]
self._CheckOSStats("All", report_type, all_stats)
self._CheckOSStats("Label1", report_type, label_stats)
self._CheckOSStats("Label2", report_type, label_stats)
def _CheckAccessStats(self, label, expected):
graph_series = client_report_utils.FetchMostRecentGraphSeries(
label, rdf_stats.ClientGraphSeries.ReportType.N_DAY_ACTIVE)
histogram = graph_series.graphs[0]
data = [(x.x_value, x.y_value) for x in histogram]
self.assertEqual(data, expected)
def _ToMicros(self, duration_str):
return rdfvalue.Duration.FromHumanReadable(duration_str).microseconds
def _CheckLastAccessStats(self):
# pyformat: disable
all_counts = [
(self._ToMicros("1d"), 2),
(self._ToMicros("2d"), 2),
(self._ToMicros("3d"), 2),
(self._ToMicros("7d"), 2),
(self._ToMicros("14d"), 22),
(self._ToMicros("30d"), 22),
(self._ToMicros("60d"), 22)
]
label_counts = [
(self._ToMicros("1d"), 0),
(self._ToMicros("2d"), 0),
(self._ToMicros("3d"), 0),
(self._ToMicros("7d"), 0),
(self._ToMicros("14d"), 10),
(self._ToMicros("30d"), 10),
(self._ToMicros("60d"), 10)
]
# pyformat: enable
# All our clients appeared at the same time (and did not appear since).
self._CheckAccessStats("All", expected=all_counts)
# All our clients appeared at the same time but this label is only half.
self._CheckAccessStats("Label1", expected=label_counts)
# All our clients appeared at the same time but this label is only half.
self._CheckAccessStats("Label2", expected=label_counts)
def testPurgeClientStats(self):
client_id = test_lib.TEST_CLIENT_ID
max_age = db.CLIENT_STATS_RETENTION.ToInt(rdfvalue.SECONDS)
for t in [1 * max_age, 1.5 * max_age, 2 * max_age]:
with test_lib.FakeTime(t):
st = rdf_client_stats.ClientStats(RSS_size=int(t))
data_store.REL_DB.WriteClientStats(client_id, st)
stat_entries = data_store.REL_DB.ReadClientStats(
client_id=client_id,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
self.assertCountEqual([1 * max_age, 1.5 * max_age, 2 * max_age],
[e.RSS_size for e in stat_entries])
with test_lib.FakeTime(2.51 * max_age):
self._RunPurgeClientStats()
stat_entries = data_store.REL_DB.ReadClientStats(
client_id=client_id,
min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
self.assertLen(stat_entries, 1)
self.assertNotIn(max_age, [e.RSS_size for e in stat_entries])
def testGRRVersionBreakDown(self):
"""Check that all client stats cron jobs are run."""
cron_run = rdf_cronjobs.CronJobRun()
job_data = rdf_cronjobs.CronJob()
cron = system.GRRVersionBreakDownCronJob(cron_run, job_data)
cron.Run()
self._CheckGRRVersionBreakDown()
def testOSBreakdown(self):
"""Check that all client stats cron jobs are run."""
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.OSBreakDownCronJob(run, job).Run()
self._CheckOSBreakdown()
def testLastAccessStats(self):
"""Check that all client stats cron jobs are run."""
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.LastAccessStatsCronJob(run, job).Run()
self._CheckLastAccessStats()
def _RunPurgeClientStats(self):
run = rdf_cronjobs.CronJobRun()
job = rdf_cronjobs.CronJob()
system.PurgeClientStatsCronJob(run, job).Run()
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
vinhlh/bite-project | deps/gdata-python-client/src/gdata/webmastertools/data.py | 126 | 5504 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Webmaster Tools Data API"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
WT_TEMPLATE = '{http://schemas.google.com/webmaster/tools/2007/}%s'
class CrawlIssueCrawlType(atom.core.XmlElement):
"""Type of crawl of the crawl issue"""
_qname = WT_TEMPLATE % 'crawl-type'
class CrawlIssueDateDetected(atom.core.XmlElement):
"""Detection date for the issue"""
_qname = WT_TEMPLATE % 'date-detected'
class CrawlIssueDetail(atom.core.XmlElement):
"""Detail of the crawl issue"""
_qname = WT_TEMPLATE % 'detail'
class CrawlIssueIssueType(atom.core.XmlElement):
"""Type of crawl issue"""
_qname = WT_TEMPLATE % 'issue-type'
class CrawlIssueLinkedFromUrl(atom.core.XmlElement):
"""Source URL that links to the issue URL"""
_qname = WT_TEMPLATE % 'linked-from'
class CrawlIssueUrl(atom.core.XmlElement):
"""URL affected by the crawl issue"""
_qname = WT_TEMPLATE % 'url'
class CrawlIssueEntry(gdata.data.GDEntry):
"""Describes a crawl issue entry"""
date_detected = CrawlIssueDateDetected
url = CrawlIssueUrl
detail = CrawlIssueDetail
issue_type = CrawlIssueIssueType
crawl_type = CrawlIssueCrawlType
linked_from = [CrawlIssueLinkedFromUrl]
class CrawlIssuesFeed(gdata.data.GDFeed):
"""Feed of crawl issues for a particular site"""
entry = [CrawlIssueEntry]
class Indexed(atom.core.XmlElement):
"""Describes the indexing status of a site"""
_qname = WT_TEMPLATE % 'indexed'
class Keyword(atom.core.XmlElement):
"""A keyword in a site or in a link to a site"""
_qname = WT_TEMPLATE % 'keyword'
source = 'source'
class KeywordEntry(gdata.data.GDEntry):
"""Describes a keyword entry"""
class KeywordsFeed(gdata.data.GDFeed):
"""Feed of keywords for a particular site"""
entry = [KeywordEntry]
keyword = [Keyword]
class LastCrawled(atom.core.XmlElement):
"""Describes the last crawled date of a site"""
_qname = WT_TEMPLATE % 'last-crawled'
class MessageBody(atom.core.XmlElement):
"""Message body"""
_qname = WT_TEMPLATE % 'body'
class MessageDate(atom.core.XmlElement):
"""Message date"""
_qname = WT_TEMPLATE % 'date'
class MessageLanguage(atom.core.XmlElement):
"""Message language"""
_qname = WT_TEMPLATE % 'language'
class MessageRead(atom.core.XmlElement):
"""Indicates if the message has already been read"""
_qname = WT_TEMPLATE % 'read'
class MessageSubject(atom.core.XmlElement):
"""Message subject"""
_qname = WT_TEMPLATE % 'subject'
class SiteId(atom.core.XmlElement):
"""Site URL"""
_qname = WT_TEMPLATE % 'id'
class MessageEntry(gdata.data.GDEntry):
"""Describes a message entry"""
wt_id = SiteId
subject = MessageSubject
date = MessageDate
body = MessageBody
language = MessageLanguage
read = MessageRead
class MessagesFeed(gdata.data.GDFeed):
"""Describes a messages feed"""
entry = [MessageEntry]
class SitemapEntry(gdata.data.GDEntry):
"""Describes a sitemap entry"""
indexed = Indexed
wt_id = SiteId
class SitemapMobileMarkupLanguage(atom.core.XmlElement):
"""Describes a markup language for URLs in this sitemap"""
_qname = WT_TEMPLATE % 'sitemap-mobile-markup-language'
class SitemapMobile(atom.core.XmlElement):
"""Lists acceptable mobile markup languages for URLs in this sitemap"""
_qname = WT_TEMPLATE % 'sitemap-mobile'
sitemap_mobile_markup_language = [SitemapMobileMarkupLanguage]
class SitemapNewsPublicationLabel(atom.core.XmlElement):
"""Specifies the publication label for this sitemap"""
_qname = WT_TEMPLATE % 'sitemap-news-publication-label'
class SitemapNews(atom.core.XmlElement):
"""Lists publication labels for this sitemap"""
_qname = WT_TEMPLATE % 'sitemap-news'
sitemap_news_publication_label = [SitemapNewsPublicationLabel]
class SitemapType(atom.core.XmlElement):
"""Indicates the type of sitemap. Not used for News or Mobile Sitemaps"""
_qname = WT_TEMPLATE % 'sitemap-type'
class SitemapUrlCount(atom.core.XmlElement):
"""Indicates the number of URLs contained in the sitemap"""
_qname = WT_TEMPLATE % 'sitemap-url-count'
class SitemapsFeed(gdata.data.GDFeed):
"""Describes a sitemaps feed"""
entry = [SitemapEntry]
class VerificationMethod(atom.core.XmlElement):
"""Describes a verification method that may be used for a site"""
_qname = WT_TEMPLATE % 'verification-method'
in_use = 'in-use'
type = 'type'
class Verified(atom.core.XmlElement):
"""Describes the verification status of a site"""
_qname = WT_TEMPLATE % 'verified'
class SiteEntry(gdata.data.GDEntry):
"""Describes a site entry"""
indexed = Indexed
wt_id = SiteId
verified = Verified
last_crawled = LastCrawled
verification_method = [VerificationMethod]
class SitesFeed(gdata.data.GDFeed):
"""Describes a sites feed"""
entry = [SiteEntry]
| apache-2.0 |
viruxel/ansible-modules-extras | windows/win_regmerge.py | 65 | 3759 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_regmerge
version_added: "2.1"
short_description: Merges the contents of a registry file into the windows registry
description:
- Wraps the reg.exe command to import the contents of a registry file.
- Suitable for use with registry files created using M(win_template).
- Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged.
- Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
- Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
- See also M(win_template), M(win_regedit)
options:
path:
description:
- The full path including file name to the registry file on the remote machine to be merged
required: true
default: no default
compare_key:
description:
- The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
required: false
default: no default
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- Organise your registry files so that they contain a single root registry
key if you want to use the compare_to functionality.
This module does not force registry settings to be in the state
described in the file. If registry settings have been modified externally
the module will merge the contents of the file but continue to report
differences on subsequent runs.
To force registry change, use M(win_regedit) with state=absent before
using M(win_regmerge).
'''
EXAMPLES = '''
# Merge in a registry file without comparing to current registry
# Note that paths using / to separate are preferred as they require less special handling than \
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
# Compare and merge registry file
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
compare_to: HKLM:\SOFTWARE\myCompany
'''
RETURN = '''
compare_to_key_found:
description: whether the parent registry key has been found for comparison
returned: when comparison key not found in registry
type: boolean
sample: false
difference_count:
description: number of differences between the registry and the file
returned: changed
type: integer
sample: 1
compared:
description: whether a comparison has taken place between the registry and the file
returned: when a comparison key has been supplied and comparison has been attempted
type: boolean
sample: true
'''
| gpl-3.0 |
tomchristie/django | tests/utils_tests/test_numberformat.py | 30 | 4697 | from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3, thousand_sep='comma', force_grouping=True), '10comma000')
def test_large_number(self):
most_max = (
'{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}'
)
most_max2 = (
'{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736'
)
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_float_numbers(self):
# A float without a fractional part (3.) results in a ".0" when no
# deimal_pos is given. Contrast that with the Decimal('3.') case in
# test_decimal_numbers which doesn't return a fractional part.
self.assertEqual(nformat(3., '.'), '3.0')
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
self.assertEqual(nformat(Decimal('9e-19'), '.', decimal_pos=2), '0.00')
self.assertEqual(nformat(Decimal('.00000000000099'), '.', decimal_pos=0), '0')
self.assertEqual(
nformat(Decimal('1e16'), '.', thousand_sep=',', grouping=3, force_grouping=True),
'10,000,000,000,000,000'
)
self.assertEqual(
nformat(Decimal('1e16'), '.', decimal_pos=2, thousand_sep=',', grouping=3, force_grouping=True),
'10,000,000,000,000,000.00'
)
self.assertEqual(nformat(Decimal('3.'), '.'), '3')
self.assertEqual(nformat(Decimal('3.0'), '.'), '3.0')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super().__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
| bsd-3-clause |
arpitn30/open-event-orga-server | app/api/events.py | 1 | 15239 | from flask import g
from flask.ext.restplus import Namespace, reqparse, marshal
from app.api.attendees import TICKET
from app.api.microlocations import MICROLOCATION
from app.api.sessions import SESSION
from app.api.speakers import SPEAKER
from app.api.sponsors import SPONSOR
from app.api.tracks import TRACK
from app.helpers.data import save_to_db, record_activity
from app.models.call_for_papers import CallForPaper as EventCFS
from app.models.event import Event as EventModel
from app.models.event_copyright import EventCopyright
from app.models.role import Role
from app.models.social_link import SocialLink as SocialLinkModel
from app.models.user import ORGANIZER
from app.models.users_events_roles import UsersEventsRoles
from helpers.special_fields import EventTypeField, EventTopicField, \
EventPrivacyField, EventSubTopicField, EventStateField
from app.api.helpers import custom_fields as fields
from app.api.helpers.helpers import requires_auth, parse_args, \
can_access, fake_marshal_with, fake_marshal_list_with, erase_from_dict
from app.api.helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
from app.api.helpers.utils import Resource, ETAG_HEADER_DEFN
api = Namespace('events', description='Events')
EVENT_CREATOR = api.model('EventCreator', {
'id': fields.Integer(),
'email': fields.Email()
})
EVENT_COPYRIGHT = api.model('EventCopyright', {
'holder': fields.String(),
'holder_url': fields.Uri(),
'licence': fields.String(),
'licence_url': fields.Uri(),
'year': fields.Integer(),
'logo': fields.String()
})
EVENT_CFS = api.model('EventCFS', {
'announcement': fields.String(),
'start_date': fields.DateTime(),
'end_date': fields.DateTime(),
'timezone': fields.String(),
'privacy': EventPrivacyField() # [public, private]
})
EVENT_VERSION = api.model('EventVersion', {
'event_ver': fields.Integer(),
'sessions_ver': fields.Integer(),
'speakers_ver': fields.Integer(),
'tracks_ver': fields.Integer(),
'sponsors_ver': fields.Integer(),
'microlocations_ver': fields.Integer()
})
SOCIAL_LINK = api.model('SocialLink', {
'id': fields.Integer(),
'name': fields.String(required=True),
'link': fields.String(required=True)
})
SOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)
del SOCIAL_LINK_POST['id']
EVENT = api.model('Event', {
'id': fields.Integer(required=True),
'identifier': fields.String(),
'name': fields.String(required=True),
'event_url': fields.Uri(),
'email': fields.Email(),
'logo': fields.Upload(),
'start_time': fields.DateTime(required=True),
'end_time': fields.DateTime(required=True),
'timezone': fields.String(),
'latitude': fields.Float(),
'longitude': fields.Float(),
'background_image': fields.Upload(attribute='background_url'),
'description': fields.String(),
'location_name': fields.String(),
'searchable_location_name': fields.String(),
'organizer_name': fields.String(),
'organizer_description': fields.String(),
'state': EventStateField(default='Draft'),
'type': EventTypeField(),
'topic': EventTopicField(),
'sub_topic': EventSubTopicField(),
'privacy': EventPrivacyField(),
'ticket_url': fields.Uri(),
'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
'copyright': fields.Nested(EVENT_COPYRIGHT, allow_null=True),
'schedule_published_on': fields.DateTime(),
'code_of_conduct': fields.String(),
'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link'),
'call_for_papers': fields.Nested(EVENT_CFS, allow_null=True),
'version': fields.Nested(EVENT_VERSION),
'has_session_speakers': fields.Boolean(default=False),
'thumbnail': fields.Uri(),
'large': fields.Uri()
})
EVENT_COMPLETE = api.clone('EventComplete', EVENT, {
'sessions': fields.List(fields.Nested(SESSION), attribute='session'),
'microlocations': fields.List(fields.Nested(MICROLOCATION), attribute='microlocation'),
'tracks': fields.List(fields.Nested(TRACK), attribute='track'),
'sponsors': fields.List(fields.Nested(SPONSOR), attribute='sponsor'),
'speakers': fields.List(fields.Nested(SPEAKER), attribute='speaker'),
'tickets': fields.List(fields.Nested(TICKET), attribute='tickets'),
})
EVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {
'results': fields.List(fields.Nested(EVENT))
})
EVENT_POST = api.clone('EventPost', EVENT)
del EVENT_POST['id']
del EVENT_POST['creator']
del EVENT_POST['social_links']
del EVENT_POST['version']
# ###################
# Data Access Objects
# ###################
class SocialLinkDAO(ServiceDAO):
"""
Social Link DAO
"""
version_key = 'event_ver'
class EventDAO(BaseDAO):
"""
Event DAO
"""
version_key = 'event_ver'
def fix_payload(self, data):
"""
Fixes the payload data.
Here converts string time from datetime obj
"""
datetime_fields = ['start_time', 'end_time', 'schedule_published_on']
for f in datetime_fields:
if f in data:
data[f] = EVENT_POST[f].from_str(data.get(f))
# cfs datetimes
if data.get('call_for_papers'):
for _ in ['start_date', 'end_date']:
if _ in data['call_for_papers']:
data['call_for_papers'][_] = EVENT_CFS[_].from_str(
data['call_for_papers'].get(_))
return data
def create(self, data, url):
data = self.validate(data)
payload = self.fix_payload(data)
# save copyright info
payload['copyright'] = CopyrightDAO.create(payload.get('copyright', {}), validate=False)
# save cfs info
if payload.get('call_for_papers'): # don't create if call_for_papers==null
payload['call_for_papers'] = CFSDAO.create(payload['call_for_papers'], validate=False)
# save event
new_event = self.model(**payload)
new_event.creator = g.user
save_to_db(new_event, "Event saved")
# set organizer
role = Role.query.filter_by(name=ORGANIZER).first()
uer = UsersEventsRoles(g.user, new_event, role)
save_to_db(uer, 'UER saved')
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(new_event.id)
return self.get(new_event.id), 201, {'Location': resource_location}
def update(self, event_id, data):
data = self.validate_put(data)
payload = self.fix_payload(data)
# get event
event = self.get(event_id)
# update copyright if key exists
if 'copyright' in payload:
CopyrightDAO.update(event.copyright.id, payload['copyright']
if payload['copyright'] else {})
payload.pop('copyright')
# update cfs
if 'call_for_papers' in payload:
cfs_data = payload.get('call_for_papers')
if event.call_for_papers:
if cfs_data: # update existing
CFSDAO.update(event.call_for_papers.id, cfs_data)
else: # delete if null
CFSDAO.delete(event.call_for_papers.id)
elif cfs_data: # create new (only if data exists)
CFSDAO.create(cfs_data, validate=False)
payload.pop('call_for_papers')
# master update
return BaseDAO.update(self, event_id, payload, validate=False)
LinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)
DAO = EventDAO(EventModel, EVENT_POST)
CopyrightDAO = BaseDAO(EventCopyright, EVENT_COPYRIGHT)
CFSDAO = BaseDAO(EventCFS, EVENT_CFS) # CFS = Call For Speakers
# DEFINE PARAMS
EVENT_PARAMS = {
'location': {},
'contains': {
'description': 'Contains the string in name and description'
},
'state': {},
'privacy': {},
'type': {},
'topic': {},
'sub_topic': {},
'start_time_gt': {},
'start_time_lt': {},
'end_time_gt': {},
'end_time_lt': {},
'time_period': {},
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors)'
},
}
SINGLE_EVENT_PARAMS = {
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors,tickets)'
},
}
def get_extended_event_model(includes=None):
if includes is None:
includes = []
included_fields = {}
if 'sessions' in includes:
included_fields['sessions'] = fields.List(fields.Nested(SESSION), attribute='session')
if 'tracks' in includes:
included_fields['tracks'] = fields.List(fields.Nested(TRACK), attribute='track')
if 'microlocations' in includes:
included_fields['microlocations'] = fields.List(fields.Nested(MICROLOCATION), attribute='microlocation')
if 'sponsors' in includes:
included_fields['sponsors'] = fields.List(fields.Nested(SPONSOR), attribute='sponsor')
if 'speakers' in includes:
included_fields['speakers'] = fields.List(fields.Nested(SPEAKER), attribute='speaker')
if 'tickets' in includes:
included_fields['tickets'] = fields.List(fields.Nested(TICKET), attribute='tickets')
return EVENT.extend('ExtendedEvent', included_fields)
# DEFINE RESOURCES
class EventResource():
"""
Event Resource Base class
"""
event_parser = reqparse.RequestParser()
event_parser.add_argument('location', type=unicode, dest='__event_search_location')
event_parser.add_argument('contains', type=unicode, dest='__event_contains')
event_parser.add_argument('state', type=str)
event_parser.add_argument('privacy', type=str)
event_parser.add_argument('type', type=str)
event_parser.add_argument('topic', type=str)
event_parser.add_argument('sub_topic', type=str)
event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')
event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')
event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')
event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')
event_parser.add_argument('time_period', type=str, dest='__event_time_period')
event_parser.add_argument('include', type=str)
class SingleEventResource():
event_parser = reqparse.RequestParser()
event_parser.add_argument('include', type=str)
@api.route('/<int:event_id>')
@api.param('event_id')
@api.response(404, 'Event not found')
class Event(Resource, SingleEventResource):
@api.doc('get_event', params=SINGLE_EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@requires_auth
@can_access
@api.doc('delete_event')
@api.marshal_with(EVENT)
def delete(self, event_id):
"""Delete an event given its id"""
event = DAO.delete(event_id)
record_activity('delete_event', event_id=event_id)
return event
@requires_auth
@can_access
@api.doc('update_event', responses=PUT_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def put(self, event_id):
"""Update an event given its id"""
event = DAO.update(event_id, self.api.payload)
record_activity('update_event', event_id=event_id)
return event
@api.route('/<int:event_id>/event')
@api.param('event_id')
@api.response(404, 'Event not found')
class EventWebapp(Resource, SingleEventResource):
@api.doc('get_event_for_webapp')
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id.
Alternate endpoint for fetching an event.
"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@api.route('')
class EventList(Resource, EventResource):
@api.doc('list_events', params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_list_with(EVENT_COMPLETE)
def get(self):
"""List all events"""
parsed_args = parse_args(self.event_parser)
includes = parsed_args.get('include', '').split(',')
erase_from_dict(parsed_args, 'include')
return marshal(DAO.list(**parsed_args), get_extended_event_model(includes))
@requires_auth
@api.doc('create_event', responses=POST_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def post(self):
"""Create an event"""
item = DAO.create(self.api.payload, self.api.url_for(self))
record_activity('create_event', event_id=item[0].id)
return item
@api.route('/page')
class EventListPaginated(Resource, PaginatedResourceBase, EventResource):
@api.doc('list_events_paginated', params=PAGE_PARAMS)
@api.doc(params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(EVENT_PAGINATED)
def get(self):
"""List events in a paginated manner"""
args = self.parser.parse_args()
return DAO.paginated_list(args=args, **parse_args(self.event_parser))
@api.route('/<int:event_id>/links')
@api.param('event_id')
class SocialLinkList(Resource):
@api.doc('list_social_links')
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_list_with(SOCIAL_LINK)
def get(self, event_id):
"""List all social links"""
return LinkDAO.list(event_id)
@requires_auth
@can_access
@api.doc('create_social_link', responses=POST_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def post(self, event_id):
"""Create a social link"""
return LinkDAO.create(
event_id,
self.api.payload,
self.api.url_for(self, event_id=event_id)
)
@api.route('/<int:event_id>/links/<int:link_id>')
class SocialLink(Resource):
@requires_auth
@can_access
@api.doc('delete_social_link')
@api.marshal_with(SOCIAL_LINK)
def delete(self, event_id, link_id):
"""Delete a social link given its id"""
return LinkDAO.delete(event_id, link_id)
@requires_auth
@can_access
@api.doc('update_social_link', responses=PUT_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def put(self, event_id, link_id):
"""Update a social link given its id"""
return LinkDAO.update(event_id, link_id, self.api.payload)
@api.hide
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(SOCIAL_LINK)
def get(self, event_id, link_id):
"""Fetch a social link given its id"""
return LinkDAO.get(event_id, link_id)
| gpl-3.0 |
zpenoyre/illustris | illustris_python/groupcat.py | 1 | 5294 | """ Illustris Simulation: Public Data Release.
groupcat.py: File I/O related to the FoF and Subfind group catalogs. """
from os.path import isfile
import numpy as np
import h5py
def gcPath(basePath,snapNum,chunkNum=0):
""" Return absolute path to a group catalog HDF5 file (modify as needed). """
gcPath = basePath + '/groups_%03d/' % snapNum
filePath1 = gcPath + 'groups_%03d.%d.hdf5' % (snapNum, chunkNum)
filePath2 = gcPath + 'fof_subhalo_tab_%03d.%d.hdf5' % (snapNum, chunkNum)
if isfile(filePath1):
return filePath1
return filePath2
def offsetPath(basePath, snapNum):
""" Return absolute path to a separate offset file (modify as needed). """
offsetPath = basePath + '../postprocessing/offsets/offsets_%03d.hdf5' % snapNum
return offsetPath
def loadObjects(basePath,snapNum,gName,nName,fields):
""" Load either halo or subhalo information from the group catalog. """
result = {}
# make sure fields is not a single element
if isinstance(fields, basestring):
fields = [fields]
# load header from first chunk
with h5py.File(gcPath(basePath,snapNum),'r') as f:
header = dict( f['Header'].attrs.items() )
result['count'] = f['Header'].attrs['N'+nName+'_Total']
if not result['count']:
print('warning: zero groups, empty return (snap='+str(snapNum)+').')
return result
# if fields not specified, load everything
if not fields:
fields = f[gName].keys()
for field in fields:
# verify existence
if not field in f[gName].keys():
raise Exception("Group catalog does not have requested field ["+field+"]!")
# replace local length with global
shape = list(f[gName][field].shape)
shape[0] = result['count']
# allocate within return dict
result[field] = np.zeros( shape, dtype=f[gName][field].dtype )
# loop over chunks
wOffset = 0
for i in range(header['NumFiles']):
f = h5py.File(gcPath(basePath,snapNum,i),'r')
if not f['Header'].attrs['N'+nName+'_ThisFile']:
continue # empty file chunk
# loop over each requested field
for field in fields:
# shape and type
shape = f[gName][field].shape
# read data local to the current file
if len(shape) == 1:
result[field][wOffset:wOffset+shape[0]] = f[gName][field][0:shape[0]]
else:
result[field][wOffset:wOffset+shape[0],:] = f[gName][field][0:shape[0],:]
wOffset += shape[0]
f.close()
# only a single field? then return the array instead of a single item dict
if len(fields) == 1:
return result[fields[0]]
return result
def loadSubhalos(basePath,snapNum,fields=None):
""" Load all subhalo information from the entire group catalog for one snapshot
(optionally restrict to a subset given by fields). """
return loadObjects(basePath,snapNum,"Subhalo","subgroups",fields)
def loadHalos(basePath,snapNum,fields=None):
""" Load all halo information from the entire group catalog for one snapshot
(optionally restrict to a subset given by fields). """
return loadObjects(basePath,snapNum,"Group","groups",fields)
def loadHeader(basePath,snapNum):
""" Load the group catalog header. """
with h5py.File(gcPath(basePath,snapNum),'r') as f:
header = dict( f['Header'].attrs.items() )
return header
def load(basePath,snapNum):
""" Load complete group catalog all at once. """
r = {}
r['subhalos'] = loadSubhalos(basePath,snapNum)
r['halos'] = loadHalos(basePath,snapNum)
r['header'] = loadHeader(basePath,snapNum)
return r
def loadSingle(basePath,snapNum,haloID=-1,subhaloID=-1):
""" Return complete group catalog information for one halo or subhalo. """
if (haloID < 0 and subhaloID < 0) or (haloID >= 0 and subhaloID >= 0):
raise Exception("Must specify either haloID or subhaloID (and not both).")
gName = "Subhalo" if subhaloID >= 0 else "Group"
searchID = subhaloID if subhaloID >= 0 else haloID
# old or new format
if 'fof_subhalo' in gcPath(basePath,snapNum):
# use separate 'offsets_nnn.hdf5' files
with h5py.File(offsetPath(basePath,snapNum),'r') as f:
offsets = f['FileOffsets/'+gName][()]
else:
# use header of group catalog
with h5py.File(gcPath(basePath,snapNum),'r') as f:
offsets = f['Header'].attrs['FileOffsets_'+gName]
offsets = searchID - offsets
fileNum = np.max( np.where(offsets >= 0) )
groupOffset = offsets[fileNum]
# load halo/subhalo fields into a dict
result = {}
with h5py.File(gcPath(basePath,snapNum,fileNum),'r') as f:
for haloProp in f[gName].keys():
result[haloProp] = f[gName][haloProp][groupOffset]
return result
| mit |
pim89/youtube-dl | youtube_dl/jsinterp.py | 16 | 8904 | from __future__ import unicode_literals
import json
import operator
import re
from .utils import (
ExtractorError,
)
_OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
_ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS]
_ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
class JSInterpreter(object):
def __init__(self, code, objects=None):
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise ExtractorError('Recursion limit reached')
should_abort = False
stmt = stmt.lstrip()
stmt_m = re.match(r'var\s', stmt)
if stmt_m:
expr = stmt[len(stmt_m.group(0)):]
else:
return_m = re.match(r'return(?:\s+|$)', stmt)
if return_m:
expr = stmt[len(return_m.group(0)):]
should_abort = True
else:
# Try interpreting it as an expression
expr = stmt
v = self.interpret_expression(expr, local_vars, allow_recursion)
return v, should_abort
def interpret_expression(self, expr, local_vars, allow_recursion):
expr = expr.strip()
if expr == '': # Empty expression
return None
if expr.startswith('('):
parens_count = 0
for m in re.finditer(r'[()]', expr):
if m.group(0) == '(':
parens_count += 1
else:
parens_count -= 1
if parens_count == 0:
sub_expr = expr[1:m.start()]
sub_result = self.interpret_expression(
sub_expr, local_vars, allow_recursion)
remaining_expr = expr[m.end():].strip()
if not remaining_expr:
return sub_result
else:
expr = json.dumps(sub_result) + remaining_expr
break
else:
raise ExtractorError('Premature end of parens in %r' % expr)
for op, opfunc in _ASSIGN_OPERATORS:
m = re.match(r'''(?x)
(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?
\s*%s
(?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr)
if not m:
continue
right_val = self.interpret_expression(
m.group('expr'), local_vars, allow_recursion - 1)
if m.groupdict().get('index'):
lvar = local_vars[m.group('out')]
idx = self.interpret_expression(
m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
cur = lvar[idx]
val = opfunc(cur, right_val)
lvar[idx] = val
return val
else:
cur = local_vars.get(m.group('out'))
val = opfunc(cur, right_val)
local_vars[m.group('out')] = val
return val
if expr.isdigit():
return int(expr)
var_m = re.match(
r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE,
expr)
if var_m:
return local_vars[var_m.group('name')]
try:
return json.loads(expr)
except ValueError:
pass
m = re.match(
r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE,
expr)
if m:
variable = m.group('var')
member = m.group('member')
arg_str = m.group('args')
if variable in local_vars:
obj = local_vars[variable]
else:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
if arg_str is None:
# Member access
if member == 'length':
return len(obj)
return obj[member]
assert expr.endswith(')')
# Function call
if arg_str == '':
argvals = tuple()
else:
argvals = tuple([
self.interpret_expression(v, local_vars, allow_recursion)
for v in arg_str.split(',')])
if member == 'split':
assert argvals == ('',)
return list(obj)
if member == 'join':
assert len(argvals) == 1
return argvals[0].join(obj)
if member == 'reverse':
assert len(argvals) == 0
obj.reverse()
return obj
if member == 'slice':
assert len(argvals) == 1
return obj[argvals[0]:]
if member == 'splice':
assert isinstance(obj, list)
index, howMany = argvals
res = []
for i in range(index, min(index + howMany, len(obj))):
res.append(obj.pop(index))
return res
return obj[member](argvals)
m = re.match(
r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr)
if m:
val = local_vars[m.group('in')]
idx = self.interpret_expression(
m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
for op, opfunc in _OPERATORS:
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
if not m:
continue
x, abort = self.interpret_statement(
m.group('x'), local_vars, allow_recursion - 1)
if abort:
raise ExtractorError(
'Premature left-side return of %s in %r' % (op, expr))
y, abort = self.interpret_statement(
m.group('y'), local_vars, allow_recursion - 1)
if abort:
raise ExtractorError(
'Premature right-side return of %s in %r' % (op, expr))
return opfunc(x, y)
m = re.match(
r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr)
if m:
fname = m.group('func')
argvals = tuple([
int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(',')])
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
return self._functions[fname](argvals)
raise ExtractorError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(
(r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' +
r'\}\s*;',
self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(
r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function'
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, funcname):
func_m = re.search(
r'''(?x)
(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
\((?P<args>[^)]*)\)\s*
\{(?P<code>[^}]+)\}''' % (
re.escape(funcname), re.escape(funcname), re.escape(funcname)),
self.code)
if func_m is None:
raise ExtractorError('Could not find JS function %r' % funcname)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def call_function(self, funcname, *args):
f = self.extract_function(funcname)
return f(args)
def build_function(self, argnames, code):
def resf(args):
local_vars = dict(zip(argnames, args))
for stmt in code.split(';'):
res, abort = self.interpret_statement(stmt, local_vars)
if abort:
break
return res
return resf
| unlicense |
HoracioAlvarado/fwd | venv/Lib/site-packages/setuptools/command/easy_install.py | 18 | 86096 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if six.PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = configparser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
_inline = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
prelude = _inline("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _inline("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'rewrite') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
| mit |
RAtechntukan/Sick-Beard | sickbeard/clients/requests/exceptions.py | 276 | 1246 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
class RequestException(RuntimeError):
"""There was an ambiguous exception that occurred while handling your
request."""
class HTTPError(RequestException):
"""An HTTP error occurred."""
def __init__(self, *args, **kwargs):
""" Initializes HTTPError with optional `response` object. """
self.response = kwargs.pop('response', None)
super(HTTPError, self).__init__(*args, **kwargs)
class ConnectionError(RequestException):
"""A Connection error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
| gpl-3.0 |
trystack/python-django-horizon-facebook | openstack_dashboard/dashboards/settings/apipassword/tests.py | 1 | 1220 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:settings:apipassword:index')
class ApiPasswordTests(test.TestCase):
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_api_password(self):
self.mox.ReplayAll()
formData = {'method': 'ApiPasswordForm',}
res = self.client.post(INDEX_URL, formData)
self.assertNoFormErrors(res)
| apache-2.0 |
thinkle/gourmet | gourmet/prefsGui.py | 1 | 11126 | from gi.repository import Gtk
import os.path
from . import gglobals
from .gtk_extras import optionTable
from . import plugin_loader, plugin
class PreferencesGui (plugin_loader.Pluggable):
"""The glue between our preferences dialog UI and our prefs modules.
Instead of "connecting", as would be normal with pygtk objects, we set up handlers in the
apply_prefs_dic which contains preference-handlers for each preference we wish.
{'preference_name':lambda foo (name,val): bar(name,val)}
"""
INDEX_PAGE = 0
CARD_PAGE = 1
SHOP_PAGE = 2
def __init__ (
self,
prefs,
uifile=os.path.join(gglobals.uibase,
'preferenceDialog.ui'),
radio_options={'shop_handle_optional':{'optional_ask':0,
'optional_add':1,
'optional_dont_add':-1
}
},
toggle_options={'remember_optionals_by_default':'remember_optionals_by_default',
'readableUnits':'toggle_readable_units',
'useFractions':'useFractions',
'showToolbar':'show_toolbar',
#'email_include_body':'email_body_checkbutton',
#'email_include_html':'email_html_checkbutton',
#'emailer_dont_ask':'remember_email_checkbutton',
},
number_options = {'recipes_per_page':'recipesPerPageSpinButton'},
buttons = {}
#buttons = {'clear_remembered_optional_button':
):
"""Set up our PreferencesGui
uifile points us to our UI file
radio_options is a dictionary of preferences controlled by radio buttons.
{preference_name: {radio_widget: value,
radio_widget: value, ...}
}
toggle_options is a dictionary of preferences controlled by toggle buttons.
{preference_name: toggle_widget_name}
buttons = {button_name : callback}
"""
self.prefs = prefs
self.ui = Gtk.Builder()
self.ui.add_from_file(uifile)
self.notebook = self.ui.get_object('notebook')
# pref name: {'buttonName':VALUE,...}
self.radio_options = radio_options
self.connect_radio_buttons()
self.toggle_options = toggle_options
self.apply_prefs_dic = {}
self.widget_sensitivity_dic = {
# pref : {'value':('widget':True,'widget':False,'widget':True...)}
'shop_handle_optional':{0:{'remember_optionals_by_default':True,
'clear_remembered_optional_button':True},
1:{'remember_optionals_by_default':False,
'clear_remembered_optional_button':False},
-1:{'remember_optionals_by_default':False,
'clear_remembered_optional_button':False}
}
}
self.connect_toggle_buttons()
self.buttons=buttons
self.connect_buttons()
self.number_options = number_options
self.connect_number_options()
self.build_pref_dictionary()
self.set_widgets_from_prefs()
self.prefs.set_hooks.append(self.update_pref)
self.pref_tables={}
self.ui.get_object('close_button').connect('clicked',lambda *args: self.hide_dialog())
plugin_loader.Pluggable.__init__(self,[plugin.PrefsPlugin])
def build_pref_dictionary (self):
"""Build our preferences dictionary pref_dic
preference: BOOLEAN_WIDGET|{VALUE:RADIO_WIDGET,VALUE:RADIO_WIDGET...}
METHOD_TO_BE_HANDED_PREF_VALUE
pref_dic will be used to e.g. set default values and watch
changing preferences.
"""
self.pref_dic = {}
for pref,widget in list(self.toggle_options.items()):
self.pref_dic[pref]=('TOGGLE',widget)
for pref,widgdic in list(self.radio_options.items()):
self.pref_dic[pref]={}
# create a dictionary by value (reversed dictionary)...
for widg,val in list(widgdic.items()): self.pref_dic[pref][val]=widg
self.d=self.ui.get_object('dialog')
self.d.connect('delete-event',self.hide_dialog)
def set_widgets_from_prefs (self):
for k in list(self.pref_dic.keys()):
# print("---", k, self.prefs, self.prefs[k])
if k in self.prefs:
self.update_pref(k,self.prefs[k])
def update_pref (self, pref, value):
"""Update GUI to reflect value 'value' of preference 'pref'."""
if pref in self.pref_dic:
action=self.pref_dic[pref]
if isinstance(action, dict):
# we fail if action is no
widg=action[value]
act,act_args=('set_active',True)
elif action[0]=='TOGGLE':
act,act_args=('set_active',value)
widg=action[1]
# in the future, we can handle Entries, etc...
if isinstance(widg, str):
widg=self.ui.get_object(widg)
getattr(widg,act)(act_args)
self.update_sensitivity_for_pref(pref,value)
def show_dialog (self, page=None):
"""present our dialog."""
self.d.present()
if page:
self.notebook.set_current_page(page)
def hide_dialog (self,*args):
self.d.hide()
return True
def connect_buttons (self):
for b,cb in list(self.buttons.items()):
self.ui.get_object(b).connect('clicked',cb)
def connect_toggle_buttons (self):
"""Connect signals for toggle buttons in self.toggle_options."""
for pref,widget in list(self.toggle_options.items()):
self.ui.get_object(widget).connect('toggled',self.toggle_callback,pref)
def toggle_callback (self, button, pref_name):
"""Set preference 'pref_name' in response to toggle event on button."""
self.set_pref(pref_name, button.get_active())
def connect_radio_buttons (self):
"""Connect radio button signals to properly set preferences on toggle."""
for pref_name,pref_dic in list(self.radio_options.items()):
for button,val in list(pref_dic.items()):
self.ui.get_object(button).connect(
'toggled',
self.radio_callback,
pref_name,
val)
def radio_callback (self, button, pref_name, true_val=True):
"""Call back for radio button: if we are on, we set the pref to true_val."""
if button.get_active():
self.set_pref(pref_name,true_val)
def connect_number_options (self):
for pref_name,widgetname in list(self.number_options.items()):
widget = self.ui.get_object(widgetname)
if hasattr(widget,'get_value'):
get_method='get_value'
elif hasattr(widget,'get_text'):
get_method=lambda *args: float(widget.get_text())
else:
print('widget',widgetname,widget,'is not very numberlike!')
return
curval = self.prefs.get(pref_name, None)
if curval:
try:
widget.set_value(curval)
except AttributeError:
widget.set_text(str(curval))
if isinstance(widget,Gtk.SpinButton):
widget.get_adjustment().connect('value-changed',
self.number_callback,
pref_name,
get_method)
else:
widget.connect('changed',
self.number_callback,
pref_name,
get_method)
def number_callback (self, widget, pref_name, get_method='get_value'):
self.set_pref(pref_name,getattr(widget,get_method)())
def set_pref (self, name, value):
"""Set preference 'name' to value 'value'
Possibly apply the preference using callback looked up in
apply_prefs_dic (callback must take name and value of pref as
arguments.
"""
self.prefs[name]=value
if name in self.apply_prefs_dic:
self.apply_prefs_dic[name](name,value)
def update_sensitivity_for_pref (self, name, value):
try:
for k,v in list(self.widget_sensitivity_dic[name][value].items()):
self.ui.get_object(k).set_sensitive(v)
except KeyError: pass
def add_widget (self, target_widget, child_widget):
"""Add child_widget to target_widget"""
if isinstance(target_widget, str):
target_widget = self.ui.get_object(target_widget)
if isinstance(child_widget, str):
child_widget = self.ui.get_object(child_widget)
target_widget.add(child_widget)
target_widget.show_all()
def add_pref_table (self, options, target_widget, callback=None):
"""Add a preference table based on an option list 'options' to the
target_widget 'target_widget' (either a widget or a glade-reference)
The options need to be appropriate for an OptionTable.
The callback will be handed the options (as returned by
OptionTable) each time OptionTable is changed.
"""
table=optionTable.OptionTable(options=options, changedcb=self.preftable_callback)
self.pref_tables[table]=callback
self.add_widget(target_widget,table)
def preftable_callback (self, widget):
for table,cb in list(self.pref_tables.items()):
if widget in table.get_children():
# then we know who's preferences we care about...
table.apply()
if cb: cb(table.options)
return
print("Oops: we couldn't handle widget %s"%widget)
if __name__ == '__main__':
class FauxPrefs (dict):
def __init__ (self,*args,**kwargs):
self.set_hooks = []
dict.__init__(self,*args,**kwargs)
def __setitem__ (self,k,v):
print('k:',k)
print('v:',v)
dict.__setitem__(self,k,v)
for h in self.set_hooks:
print('runnnig hook')
h(k,v)
gf='/home/tom/Projects/grm-db-experiments/glade/preferenceDialog.ui'
import sys
p=PreferencesGui(FauxPrefs(),gf)
def printstuff (*args): print(args)
p.add_pref_table([["Toggle Option",True],
["String Option","Hello"],
["Integer Option",1],
["Float Option",float(3)]],
'cardViewVBox',
printstuff
)
p.show_dialog()
Gtk.main()
| gpl-2.0 |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/sqlalchemy/ext/serializer.py | 81 | 5586 | # ext/serializer.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session
etc. which are referenced by the structure are not persisted in serialized
form, but are instead re-associated with the query structure
when it is deserialized.
Usage is nearly the same as that of the standard Python pickle module::
from sqlalchemy.ext.serializer import loads, dumps
metadata = MetaData(bind=some_engine)
Session = scoped_session(sessionmaker())
# ... define mappers
query = Session.query(MyClass).
filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
# pickle the query
serialized = dumps(query)
# unpickle. Pass in metadata + scoped_session
query2 = loads(serialized, metadata, Session)
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized
directly.
* Table metadata that is to be loaded entirely from the serialized structure
(i.e. is not already declared in the application). Regular
pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object,
typically one which was reflected from an existing database at some previous
point in time. The serializer module is specifically for the opposite case,
where the Table metadata is already present in memory.
"""
from ..orm import class_mapper
from ..orm.session import Session
from ..orm.mapper import Mapper
from ..orm.interfaces import MapperProperty
from ..orm.attributes import QueryableAttribute
from .. import Table, Column
from ..engine import Engine
from ..util import pickle, byte_buffer, b64encode, b64decode, text_type
import re
__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads']
def Serializer(*args, **kw):
pickler = pickle.Pickler(*args, **kw)
def persistent_id(obj):
# print "serializing:", repr(obj)
if isinstance(obj, QueryableAttribute):
cls = obj.impl.class_
key = obj.impl.key
id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls))
elif isinstance(obj, Mapper) and not obj.non_primary:
id = "mapper:" + b64encode(pickle.dumps(obj.class_))
elif isinstance(obj, MapperProperty) and not obj.parent.non_primary:
id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \
":" + obj.key
elif isinstance(obj, Table):
id = "table:" + text_type(obj.key)
elif isinstance(obj, Column) and isinstance(obj.table, Table):
id = "column:" + \
text_type(obj.table.key) + ":" + text_type(obj.key)
elif isinstance(obj, Session):
id = "session:"
elif isinstance(obj, Engine):
id = "engine:"
else:
return None
return id
pickler.persistent_id = persistent_id
return pickler
our_ids = re.compile(
r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)')
def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler = pickle.Unpickler(file)
def get_engine():
if engine:
return engine
elif scoped_session and scoped_session().bind:
return scoped_session().bind
elif metadata and metadata.bind:
return metadata.bind
else:
return None
def persistent_load(id):
m = our_ids.match(text_type(id))
if not m:
return None
else:
type_, args = m.group(1, 2)
if type_ == 'attribute':
key, clsarg = args.split(":")
cls = pickle.loads(b64decode(clsarg))
return getattr(cls, key)
elif type_ == "mapper":
cls = pickle.loads(b64decode(args))
return class_mapper(cls)
elif type_ == "mapperprop":
mapper, keyname = args.split(':')
cls = pickle.loads(b64decode(mapper))
return class_mapper(cls).attrs[keyname]
elif type_ == "table":
return metadata.tables[args]
elif type_ == "column":
table, colname = args.split(':')
return metadata.tables[table].c[colname]
elif type_ == "session":
return scoped_session()
elif type_ == "engine":
return get_engine()
else:
raise Exception("Unknown token: %s" % type_)
unpickler.persistent_load = persistent_load
return unpickler
def dumps(obj, protocol=0):
buf = byte_buffer()
pickler = Serializer(buf, protocol)
pickler.dump(obj)
return buf.getvalue()
def loads(data, metadata=None, scoped_session=None, engine=None):
buf = byte_buffer(data)
unpickler = Deserializer(buf, metadata, scoped_session, engine)
return unpickler.load()
| mit |
vj-ug/gcloud-python | gcloud/storage/batch.py | 8 | 11360 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch updates / deletes of storage buckets / blobs.
See: https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
"""
from email.encoders import encode_noop
from email.generator import Generator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.parser import Parser
import httplib2
import io
import json
import six
from gcloud.exceptions import make_exception
from gcloud.storage.connection import Connection
class MIMEApplicationHTTP(MIMEApplication):
"""MIME type for ``application/http``.
Constructs payload from headers and body
:type method: string
:param method: HTTP method
:type uri: string
:param uri: URI for HTTP request
:type headers: dict
:param headers: HTTP headers
:type body: text or None
:param body: HTTP payload
"""
def __init__(self, method, uri, headers, body):
if isinstance(body, dict):
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = len(body)
if body is None:
body = ''
lines = ['%s %s HTTP/1.1' % (method, uri)]
lines.extend(['%s: %s' % (key, value)
for key, value in sorted(headers.items())])
lines.append('')
lines.append(body)
payload = '\r\n'.join(lines)
if six.PY2: # pragma: NO COVER Python2
# Sigh. email.message.Message is an old-style class, so we
# cannot use 'super()'.
MIMEApplication.__init__(self, payload, 'http', encode_noop)
else: # pragma: NO COVER Python3
super_init = super(MIMEApplicationHTTP, self).__init__
super_init(payload, 'http', encode_noop)
class NoContent(object):
"""Emulate an HTTP '204 No Content' response."""
status = 204
class _FutureDict(object):
"""Class to hold a future value for a deferred request.
Used by for requests that get sent in a :class:`Batch`.
"""
@staticmethod
def get(key, default=None):
"""Stand-in for dict.get.
:type key: object
:param key: Hashable dictionary key.
:type default: object
:param default: Fallback value to dict.get.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError('Cannot get(%r, default=%r) on a future' % (
key, default))
def __getitem__(self, key):
"""Stand-in for dict[key].
:type key: object
:param key: Hashable dictionary key.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError('Cannot get item %r from a future' % (key,))
def __setitem__(self, key, value):
"""Stand-in for dict[key] = value.
:type key: object
:param key: Hashable dictionary key.
:type value: object
:param value: Dictionary value.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError('Cannot set %r -> %r on a future' % (key, value))
class Batch(Connection):
"""Proxy an underlying connection, batching up change operations.
:type client: :class:`gcloud.storage.client.Client`
:param client: The client to use for making connections.
"""
_MAX_BATCH_SIZE = 1000
def __init__(self, client):
super(Batch, self).__init__()
self._client = client
self._requests = []
self._target_objects = []
def _do_request(self, method, url, headers, data, target_object):
"""Override Connection: defer actual HTTP request.
Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred.
:type method: string
:param method: The HTTP method to use in the request.
:type url: string
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: string
:param data: The data to send as the body of the request.
:type target_object: object or :class:`NoneType`
:param target_object: This allows us to enable custom behavior in our
batch connection. Here we defer an HTTP request
and complete initialization of the object at a
later time.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
if len(self._requests) >= self._MAX_BATCH_SIZE:
raise ValueError("Too many deferred requests (max %d)" %
self._MAX_BATCH_SIZE)
self._requests.append((method, url, headers, data))
result = _FutureDict()
self._target_objects.append(target_object)
if target_object is not None:
target_object._properties = result
return NoContent(), result
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, string)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
for method, uri, headers, body in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else: # pragma: NO COVER Python2
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split('\n\n', 1)
return dict(multi._headers), body
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
"""
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses):
raise ValueError('Expected a response for every request.')
for target_object, sub_response in zip(self._target_objects,
responses):
resp_headers, sub_payload = sub_response
if not 200 <= resp_headers.status < 300:
exception_args = exception_args or (resp_headers,
sub_payload)
elif target_object is not None:
target_object._properties = sub_payload
if exception_args is not None:
raise make_exception(*exception_args)
def finish(self):
"""Submit a single `multipart/mixed` request w/ deferred requests.
:rtype: list of tuples
:returns: one ``(headers, payload)`` tuple per deferred request.
"""
headers, body = self._prepare_batch_request()
url = '%s/batch' % self.API_BASE_URL
# Use the private ``_connection`` rather than the public
# ``.connection``, since the public connection may be this
# current batch.
response, content = self._client._connection._make_request(
'POST', url, data=body, headers=headers)
responses = list(_unpack_batch_response(response, content))
self._finish_futures(responses)
return responses
def current(self):
"""Return the topmost batch, or None."""
return self._client.current_batch
def __enter__(self):
self._client._push_batch(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.finish()
finally:
self._client._pop_batch()
def _generate_faux_mime_message(parser, response, content):
"""Convert response, content -> (multipart) email.message.
Helper for _unpack_batch_response.
"""
# We coerce to bytes to get consitent concat across
# Py2 and Py3. Percent formatting is insufficient since
# it includes the b in Py3.
if not isinstance(content, six.binary_type):
content = content.encode('utf-8')
content_type = response['content-type']
if not isinstance(content_type, six.binary_type):
content_type = content_type.encode('utf-8')
faux_message = b''.join([
b'Content-Type: ',
content_type,
b'\nMIME-Version: 1.0\n\n',
content,
])
if six.PY2:
return parser.parsestr(faux_message)
else: # pragma: NO COVER Python3
return parser.parsestr(faux_message.decode('utf-8'))
def _unpack_batch_response(response, content):
"""Convert response, content -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`httplib2.Http.request` (a pair of headers and payload).
:type response: :class:`httplib2.Response`
:param response: HTTP response / headers from a request.
:type content: string
:param content: Response payload with a batch response.
:rtype: generator
:returns: A generator of header, payload pairs.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response, content)
if not isinstance(message._payload, list):
raise ValueError('Bad response: not multi-part')
for subrequest in message._payload:
status_line, rest = subrequest._payload.split('\n', 1)
_, status, _ = status_line.split(' ', 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
ctype = sub_message['Content-Type']
msg_headers = dict(sub_message._headers)
msg_headers['status'] = status
headers = httplib2.Response(msg_headers)
if ctype and ctype.startswith('application/json'):
payload = json.loads(payload)
yield headers, payload
| apache-2.0 |
jezdez/kuma | vendor/packages/translate/convert/factory.py | 25 | 7868 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Factory methods to convert supported input files to supported translatable files."""
import os
#from translate.convert import prop2po, po2prop, odf2xliff, xliff2odf
__all__ = ['converters', 'UnknownExtensionError', 'UnsupportedConversionError']
# Turn into property to support lazy loading of things?
converters = {}
#for module in (prop2po, po2prop, odf2xliff, xliff2odf):
# if not hasattr(module, 'formats'):
# continue
# for extension in module.formats:
# if extension not in converters:
# converters[extension] = []
# converters[extension].append(module.formats[extension])
class UnknownExtensionError(Exception):
def __init__(self, afile):
self.file = afile
def __str__(self):
return 'Unable to find extension for file: %s' % (self.file)
def __unicode__(self):
return unicode(str(self))
class UnsupportedConversionError(Exception):
def __init__(self, in_ext=None, out_ext=None, templ_ext=None):
self.in_ext = in_ext
self.out_ext = out_ext
self.templ_ext = templ_ext
def __str__(self):
msg = "Unsupported conversion from %s to %s" % (self.in_ext, self.out_ext)
if self.templ_ext:
msg += ' with template %s' % (self.templ_ext)
return msg
def __unicode__(self):
return unicode(str(self))
def get_extension(filename):
path, fname = os.path.split(filename)
ext = fname.split(os.extsep)[-1]
if ext == fname:
return None
return ext
def get_converter(in_ext, out_ext=None, templ_ext=None):
convert_candidates = None
if templ_ext:
if (in_ext, templ_ext) in converters:
convert_candidates = converters[(in_ext, templ_ext)]
else:
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
else:
if in_ext in converters:
convert_candidates = converters[in_ext]
elif (in_ext,) in converters:
convert_candidates = converters[(in_ext,)]
else:
raise UnsupportedConversionError(in_ext, out_ext)
convert_fn = None
if not out_ext:
out_ext, convert_fn = convert_candidates[0]
else:
for ext, func in convert_candidates:
if ext == out_ext:
convert_fn = func
break
if not convert_fn:
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
return convert_fn
def get_output_extensions(ext):
"""Compiles a list of possible output extensions for the given input extension."""
out_exts = []
for key in converters:
in_ext = key
if isinstance(key, tuple):
in_ext = key[0]
if in_ext == ext:
for out_ext, convert_fn in converters[key]:
out_exts.append(out_ext)
return out_exts
def convert(inputfile, template=None, options=None, convert_options=None):
"""Convert the given input file to an appropriate output format, optionally
using the given template file and further options.
If the output extension (format) cannot be inferred the first converter
that can handle the input file (and the format/extension it gives as
output) is used.
:type inputfile: file
:param inputfile: The input file to be converted
:type template: file
:param template: Template file to use during conversion
:type options: dict (default: None)
:param options: Valid options are:
- in_ext: The extension (format) of the input file.
- out_ext: The extension (format) to use for the output file.
- templ_ext: The extension (format) of the template file.
- in_fname: File name of the input file; used only to determine
the input file extension (format).
- templ_fname: File name of the template file; used only to
determine the template file extension (format).
:returns: a 2-tuple: The new output file (in a temporary directory) and
the extension (format) of the output file. The caller is
responsible for deleting the (temporary) output file."""
in_ext, out_ext, templ_ext = None, None, None
# Get extensions from options
if options is None:
options = {}
else:
if 'in_ext' in options:
in_ext = options['in_ext']
if 'out_ext' in options:
out_ext = options['out_ext']
if template and 'templ_ext' in options:
templ_ext = options['templ_ext']
# If we still do not have extensions, try and get it from the *_fname options
if not in_ext and 'in_fname' in options:
in_ext = get_extension(options['in_fname'])
if template and not templ_ext and 'templ_fname' in options:
templ_fname = get_extension(options['templ_fname'])
# If we still do not have extensions, get it from the file names
if not in_ext and hasattr(inputfile, 'name'):
in_ext = get_extension(inputfile.name)
if template and not templ_ext and hasattr(template, 'name'):
templ_ext = get_extension(template.name)
if not in_ext:
raise UnknownExtensionError(inputfile)
if template and not templ_ext:
raise UnknownExtensionError(template)
out_ext_candidates = get_output_extensions(in_ext)
if not out_ext_candidates:
# No converters registered for the in_ext we have
raise UnsupportedConversionError(in_ext=in_ext, templ_ext=templ_ext)
if out_ext and out_ext not in out_ext_candidates:
# If out_ext has a value at this point, it was given in options, so
# we just take a second to make sure that the conversion is supported.
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
if not out_ext and templ_ext in out_ext_candidates:
# If we're using a template, chances are (pretty damn) good that the
# output file will be of the same type
out_ext = templ_ext
else:
# As a last resort, we'll just use the first possible output format
out_ext = out_ext_candidates[0]
# XXX: We are abusing tempfile.mkstemp() below: we are only using it to
# obtain a temporary file name to use the normal open() with. This is
# done because a tempfile.NamedTemporaryFile simply gave too many
# issues when being closed (and deleted) by the rest of the toolkit
# (eg. TranslationStore.savefile()). Therefore none of mkstemp()'s
# security features are being utilised.
import tempfile
tempfd, tempfname = tempfile.mkstemp(prefix='ttk_convert', suffix=os.extsep + out_ext)
os.close(tempfd)
outputfile = open(tempfname, 'w')
if convert_options is None:
convert_options = {}
get_converter(in_ext, out_ext, templ_ext)(inputfile, outputfile, template, **convert_options)
if hasattr(outputfile, 'closed') and hasattr(outputfile, 'close') and not outputfile.closed:
outputfile.close()
return outputfile, out_ext
| mpl-2.0 |
keyurpatel076/MissionPlannerGit | Lib/py_compile.py | 111 | 5930 | """Routine to "compile" a .py file to a .pyc (or .pyo) file.
This module has intimate knowledge of the format of .pyc files.
"""
import __builtin__
import imp
import marshal
import os
import sys
import traceback
MAGIC = imp.get_magic()
__all__ = ["compile", "main", "PyCompileError"]
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be given,
consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable 'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff))
f.write(chr((x >> 24) & 0xff))
def compile(file, cfile=None, dfile=None, doraise=False):
"""Byte-compile one Python source file to Python bytecode.
Arguments:
file: source filename
cfile: target filename; defaults to source with 'c' or 'o' appended
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
dfile: purported filename; defaults to source (this is the filename
that will show up in error messages)
doraise: flag indicating whether or not an exception should be
raised when a compile error is found. If an exception
occurs and this flag is set to False, a string
indicating the nature of the exception will be printed,
and the function will return to the caller. If an
exception occurs and this flag is set to True, a
PyCompileError exception will be raised.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc (or .pyo) file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc/.pyo file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
"""
with open(file, 'U') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = __builtin__.compile(codestring, dfile or file,'exec')
except Exception,err:
py_exc = PyCompileError(err.__class__,err.args,dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
cfile = file + (__debug__ and 'c' or 'o')
with open(cfile, 'wb') as fc:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write("%s\n" % error.msg)
except IOError as error:
rv = 1
sys.stderr.write("%s\n" % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
# return value to indicate at least one failure
rv = 1
sys.stderr.write(error.msg)
return rv
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
karinemiras/evoman_framework | evoman/enemy7.py | 1 | 12823 | ################################
# EvoMan FrameWork - V1.0 2016 #
# Author: Karine Miras #
# karine.smiras@gmail.com #
################################
import sys
import numpy
import random
import Base
from Base.SpriteConstants import *
from Base.SpriteDefinition import *
from sensors import Sensors
tilemap = 'evoman/map4.tmx'
timeexpire = 1000 # game run limit
# enemy 7 sprite, bubbleman
class Enemy(pygame.sprite.Sprite):
def __init__(self, location, *groups):
super(Enemy, self).__init__(*groups)
self.spriteDefinition = SpriteDefinition('evoman/images/EnemySprites.png', 0, 0, 43, 59)
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = -1
self.max_life = 100
self.life = self.max_life
self.resting = 0
self.dy = 0
self.alternate = 1
self.imune = 0
self.timeenemy = 0
self.twists = []
self.bullets = 0
self.hurt = 0
self.shooting = 0
self.gun_cooldown = 0
self.gun_cooldown2 = 0
def update(self, dt, game):
if game.time==1:
# puts enemy in random initial position
if game.randomini == 'yes':
self.rect.x = numpy.random.choice([640,500,400,300])
# defines game mode for player actionv
if game.enemymode == 'static': # enemy controlled by static movements
if self.timeenemy>=4 and self.timeenemy<=20 and self.timeenemy%4 == 0:
atack1 = 1
else:
atack1 = 0
atack2 = 1 #useless
if self.timeenemy==4:
atack2 = 1
else:
atack2 = 0
if self.timeenemy==5:
atack3 = 1
else:
atack3 = 0
if self.timeenemy>=50 and self.timeenemy<80:
atack4 = 1
else:
atack4 = 0
if self.timeenemy == 50:
atack5 = 1
else:
atack5 = 0
if self.timeenemy == 100:
atack6 = 1
else:
atack6 = 0
elif game.enemymode == 'ai': # Player controlled by AI algorithm.
# calls the controller providing game sensors
actions = game.enemy_controller.control(self.sensors.get(game), game.econt)
if len(actions) < 6:
game.print_logs("ERROR: Enemy 1 controller must return 6 decision variables.")
sys.exit(0)
atack1 = actions[0]
atack2 = actions[1]
atack3 = actions[2]
atack4 = actions[3]
atack5 = actions[4]
atack6 = actions[5]
if atack1 == 1 and not self.gun_cooldown2:
atack1 = 1
else:
atack1 = 0
if atack3 == 1 and not self.gun_cooldown:
atack3 = 1
else:
atack3 = 0
# marks the flag indicating to the player that the map is on water environment
game.player.inwater = 1
# if the 'start game' marker is 1
if game.start == 1:
# increments enemy timer
self.timeenemy += 1
# copies last position state of the enemy
last = self.rect.copy()
# calculates a relative distance factor, between the player and enemy to set up the jumping strengh
aux_dist = (abs(game.player.rect.right - self.rect.right)/490.0)+0.3
# shoots 5 bullets positioned over the same range
if atack1 == 1:
self.shooting = 5
self.gun_cooldown2 = 3
# bullets sound effect
if game.sound == "on" and game.playermode == "human":
sound = pygame.mixer.Sound('evoman/sounds/scifi011.wav')
c = pygame.mixer.Channel(3)
c.set_volume(10)
c.play(sound)
rand = numpy.random.randint(0, 25, 1)
self.twists.append(Bullet_e7((self.rect.x,self.rect.y), self.direction, len(self.twists), game.sprite_e))
# throws from 1 to 3 bubbles, starting at slighly different positions
if self.bullets == 0: # if the bubblues have gone away, enemy is abble to realease new bubbles.
rand = 2
for i in range(0,rand):
if atack3 == 1:
self.gun_cooldown = 3
self.bullets += 1
self.twists.append(Bullet_e72((self.rect.x+self.direction*i*30 ,self.rect.y-i*30), self.direction, len(self.twists), game.sprite_e))
# decreases time for bullets limitation
self.gun_cooldown = max(0, self.gun_cooldown - dt)
# decreases time for bullets limitation
self.gun_cooldown2 = max(0, self.gun_cooldown2 - dt)
# enemy moves during some time, after standing still for a while
if atack4 == 1:
self.rect.x += self.direction * 600 * aux_dist * dt * 0.7
# enemy jumps while is moving
if self.resting == 1 and atack5 == 1:
self.dy = -1500
self.resting = 0
# at the end of the atack cicle, enemy turns over the players direction.
if atack6 == 1:
if game.enemymode == 'static':
if game.player.rect.right < self.rect.left:
self.direction = -1
if game.player.rect.left > self.rect.right:
self.direction = 1
else:
self.direction = self.direction * -1
# reinicializes enemy timer
self.timeenemy = 0
# gravity
self.dy = min(400, self.dy + 100)
self.rect.y += self.dy * dt * 0.4
# changes the image when enemy jumps or stands up
if self.resting == 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.RIGHT)
else:
if self.direction == -1:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
# checks collision of the player with the enemy
if self.rect.colliderect(game.player.rect):
# choses what sprite penalise according to config
if game.contacthurt == "player":
game.player.life = max(0, game.player.life-(game.level*0.3))
if game.contacthurt == "enemy":
game.enemy.life = max(0, game.enemy.life-(game.level*0.3))
game.player.rect.x += self.direction * 50 * dt # pushes player when he collides with the enemy
# limits the player to stand on the screen space even being pushed
if game.player.rect.x < 60:
game.player.rect.x = 60
if game.player.rect.x > 620:
game.player.rect.x = 620
if self.rect.x < 70:
self.rect.x = 70
if self.rect.x > 610:
self.rect.x = 610
game.player.hurt = 5 # Sets flag to change the player image when he is hurt.
# controls screen walls and platforms limits agaist enemy
new = self.rect
self.resting = 0
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
blockers = cell['blockers']
if 'l' in blockers and last.right <= cell.left and new.right > cell.left:
new.right = cell.left
if 'r' in blockers and last.left >= cell.right and new.left < cell.right:
new.left = cell.right
if 't' in blockers and last.bottom <= cell.top and new.bottom > cell.top:
self.resting = 1
new.bottom = cell.top
self.dy = 0
if 'b' in blockers and last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
# hurt enemy image
if self.hurt > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.RIGHT)
self.hurt -=1
# changes bullets images according to the enemy direction
if self.shooting > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.RIGHT)
self.shooting -= 1
self.shooting = max(0,self.shooting)
def updateSprite(self, state, direction):
self.image = self.spriteDefinition.getImage(state, direction)
# enemy's bullet
class Bullet_e7(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bullet2_l.png')
def __init__(self, location, direction, n_twist, *groups):
super(Bullet_e7, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.n_twist = n_twist
def update(self, dt, game):
self.rect.x += self.direction * 500 * dt # moves the bullets on the axis x
# removes bullets objetcs when they transpass the screen limits
if self.rect.right < 1 or self.rect.left>736 or self.rect.bottom < 1 or self.rect.top > 512:
self.kill()
game.enemy.twists[self.n_twist] = None
return
# checks collision of enemy's bullet with the player
if self.rect.colliderect(game.player.rect):
# player loses life points, accoring to the difficult level of the game (the more difficult, the more it loses).
game.player.life = max(0, game.player.life-(game.level*0.3))
game.player.rect.x += self.direction * 100 * dt # pushes player when he collides with the enemy
# limits the player to stand on the screen space even being pushed
if game.player.rect.x < 60:
game.player.rect.x = 60
if game.player.rect.x > 620:
game.player.rect.x = 620
# sets flag to change the player image when he is hurt
game.player.hurt = 1
else:
game.player.hurt = 0
# enemy's bullet 2 (bubble)
class Bullet_e72(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bubb.png')
def __init__(self, location, direction, n_twist, *groups):
super(Bullet_e72, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.direc = 1
self.n_twist = n_twist
def update(self, dt, game):
self.rect.x += self.direction * 200 * dt * 0.5 # moves the bullets on the axis x
# moves the bullets on the axis y. Go up and down according to the floor and imaginary top.
self.rect.y += 200 * self.direc * dt * 0.4
if self.rect.y >= 460 or self.rect.y <= 350:
self.direc = self.direc * -1
# removes bullets objetcs when they transpass the screen limits
if self.rect.right < 1 or self.rect.left>736 or self.rect.bottom < 1 or self.rect.top > 512:
self.kill()
game.enemy.twists[self.n_twist] = None
game.enemy.bullets -=1
return
# checks collision of enemy's bullet with the player
if self.rect.colliderect(game.player.rect):
# player loses life points, according to the difficulty level of the game (the more difficult, the more it loses).
game.player.life = max(0, game.player.life-(game.level*0.3))
game.player.rect.x += self.direction * 100 * dt # pushes player when he collides with the enemy
# limits the player to stand on the screen space even being pushed
if game.player.rect.x < 60:
game.player.rect.x = 60
if game.player.rect.x > 620:
game.player.rect.x = 620
game.player.hurt = 5 # sets flag to change the player image when he is hurt
| cc0-1.0 |
hieupham007/Titanium_Mobile | support/iphone/tools.py | 34 | 8313 |
import os, sys, codecs, shutil, filecmp, subprocess
# the template_dir is the path where this file lives on disk
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
def ensure_dev_path(debug=True):
rc = subprocess.call(["xcode-select", "-print-path"], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
if rc == 0 :
return
if debug:
print '[INFO] XCode 4.3+ likely. Searching for developer folders.'
trypath = '/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
trypath = '/Applications/Xcode.app/Contents/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
spotlight_args = ['mdfind','kMDItemDisplayName==Xcode&&kMDItemKind==Application']
spotlight = subprocess.Popen(spotlight_args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
for line in spotlight.stdout.readlines():
trypath = line.rstrip()+'/Contents/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
def read_config(f):
props = {}
if os.path.exists(f):
contents = open(f).read()
for line in contents.splitlines(False):
if line[0:1]=='#': continue
(k,v) = line.split("=")
props[k]=v
return props
def locate_modules(modules, project_dir, assets_dest_dir, log):
module_lib_search_path = []
module_asset_dirs = []
for module in modules:
if module.js:
# Skip CommonJS modules. These will be processed in a later pass.
continue
module_id = module.manifest.moduleid.lower()
module_version = module.manifest.version
module_lib_name = ('lib%s.a' % module_id).lower()
# check first in the local project
local_module_lib = os.path.join(project_dir, 'modules', 'iphone', module_lib_name)
local = False
if os.path.exists(local_module_lib):
module_lib_search_path.append([module_lib_name, local_module_lib])
local = True
log("[INFO] Detected (local) third-party module: %s" % (local_module_lib))
else:
if module.lib is None:
module_lib_path = module.get_resource(module_lib_name)
log("[ERROR] Third-party module: %s/%s missing library at %s" % (module_id, module_version, module_lib_path))
sys.exit(1)
module_lib_search_path.append([module_lib_name, os.path.abspath(module.lib).rsplit('/',1)[0]])
log("[INFO] Detected third-party module: %s/%s" % (module_id, module_version))
if not local:
# copy module resources
img_dir = module.get_resource('assets', 'images')
if os.path.exists(img_dir):
dest_img_dir = os.path.join(assets_dest_dir, 'modules', module_id, 'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
module_asset_dirs.append([img_dir, dest_img_dir])
# copy in any module assets
module_assets_dir = module.get_resource('assets')
if os.path.exists(module_assets_dir):
module_dir = os.path.join(assets_dest_dir, 'modules', module_id)
module_asset_dirs.append([module_assets_dir, module_dir])
return module_lib_search_path, module_asset_dirs
def link_modules(modules, name, proj_dir, relative=False):
if len(modules)>0:
from pbxproj import PBXProj
proj = PBXProj()
xcode_proj = os.path.join(proj_dir,'%s.xcodeproj'%name,'project.pbxproj')
current_xcode = open(xcode_proj).read()
for tp in modules:
proj.add_static_library(tp[0], tp[1], relative)
out = proj.parse(xcode_proj)
# since xcode changes can be destructive, only write as necessary (if changed)
if current_xcode!=out:
xo = open(xcode_proj, 'w')
xo.write(out)
xo.close()
def create_info_plist(tiapp, template_dir, project_dir, output):
def write_info_plist(infoplist_tmpl):
name = tiapp.properties['name']
appid = tiapp.properties['id']
plist = codecs.open(infoplist_tmpl, encoding='utf-8').read()
plist = plist.replace('__PROJECT_NAME__',name)
plist = plist.replace('__PROJECT_ID__',appid)
plist = plist.replace('__URL__',appid)
urlscheme = name.replace('.','_').replace(' ','').lower()
plist = plist.replace('__URLSCHEME__',urlscheme)
if tiapp.has_app_property('ti.facebook.appid'):
fbid = tiapp.get_app_property('ti.facebook.appid')
plist = plist.replace('__ADDITIONAL_URL_SCHEMES__', '<string>fb%s</string>' % fbid)
else:
plist = plist.replace('__ADDITIONAL_URL_SCHEMES__','')
pf = codecs.open(output,'w', encoding='utf-8')
pf.write(plist)
pf.close()
# if the user has a Info.plist in their project directory, consider
# that a custom override
infoplist_tmpl = os.path.join(project_dir,'Info.plist')
if os.path.exists(infoplist_tmpl):
shutil.copy(infoplist_tmpl,output)
else:
infoplist_tmpl = os.path.join(template_dir,'Info.plist')
write_info_plist(infoplist_tmpl)
def write_debugger_plist(debughost, debugport, debugairkey, debughosts, template_dir, debuggerplist):
debugger_tmpl = os.path.join(template_dir,'debugger.plist')
plist = codecs.open(debugger_tmpl, encoding='utf-8').read()
if debughost:
plist = plist.replace('__DEBUGGER_HOST__',debughost)
plist = plist.replace('__DEBUGGER_PORT__',debugport)
else:
plist = plist.replace('__DEBUGGER_HOST__','')
plist = plist.replace('__DEBUGGER_PORT__','')
if debugairkey:
plist = plist.replace('__DEBUGGER_AIRKEY__',debugairkey)
else:
plist = plist.replace('__DEBUGGER_AIRKEY__','')
if debughosts:
plist = plist.replace('__DEBUGGER_HOSTS__',debughosts)
else:
plist = plist.replace('__DEBUGGER_HOSTS__','')
tempfile = debuggerplist+'.tmp'
pf = codecs.open(tempfile,'w',encoding='utf-8')
pf.write(plist)
pf.close()
if os.path.exists(debuggerplist):
changed = not filecmp.cmp(tempfile, debuggerplist, shallow=False)
else:
changed = True
shutil.move(tempfile, debuggerplist)
return changed
def install_default(image, project_dir, template_dir, dest):
project_resources = os.path.join(project_dir, 'Resources')
platform_resources = os.path.join(project_resources, 'iphone')
template_resources = os.path.join(template_dir, 'resources')
if image is not None:
graphic_path = os.path.join(platform_resources,image)
else:
graphic_path = os.path.join(template_resources, image)
if not os.path.exists(graphic_path):
graphic_path = os.path.join(project_resources,image)
if not os.path.exists(graphic_path):
graphic_path = os.path.join(template_resources,image)
if os.path.exists(graphic_path):
dest_graphic_path = os.path.join(dest,image)
if os.path.exists(dest_graphic_path):
os.remove(dest_graphic_path)
shutil.copy(graphic_path, dest)
def install_logo(tiapp, applogo, project_dir, template_dir, dest):
# copy over the appicon
if applogo==None and tiapp.properties.has_key('icon'):
applogo = tiapp.properties['icon']
install_default(applogo, project_dir, template_dir, dest)
def install_defaults(project_dir, template_dir, dest):
for graphic in os.listdir(os.path.join(template_dir, 'resources')):
install_default(graphic, project_dir, template_dir, dest)
def fix_xcode_script(content,script_name,script_contents):
# fix up xcode compile scripts in build phase
start = 0
while start >= 0:
start = content.find("name = \"%s\";" % script_name, start)
if start > 0:
begin = content.find("shellScript = ",start)
if begin > 0:
end = content.find("};",begin+1)
if end > 0:
before = content[0:begin+15]
after = content[end:]
script = "%s\";\n " % script_contents
content = before + script + after
start = begin
return content
SPLICE_START_MARKER="TI_AUTOGEN_BEGIN"
SPLICE_END_MARKER="TI_AUTOGEN_END"
def splice_code(file, section, replacement):
if not os.path.exists(file):
return False
with open(file, 'r') as fd:
contents = fd.read()
# want to preserve this as part of the preamble
start_search = "//##%s %s" % (SPLICE_START_MARKER, section)
start_marker = contents.find(start_search)
if start_marker == -1:
return False
end_marker = contents.find("//##%s %s" % (SPLICE_END_MARKER, section), start_marker)
if end_marker == -1:
print "[ERROR] Couldn't splice section %s in %s: No end marker" % (section, file)
return False
preamble = contents[0:start_marker+len(start_search)] + "\n"
appendix = contents[end_marker:]
new_contents = preamble + replacement + appendix
if contents != new_contents:
with open(file, 'w') as fd:
fd.write(new_contents)
return True
return False
| apache-2.0 |
umitproject/openmonitor-desktop-agent | umit/icm/agent/gui/dashboard/timeline/TimeLineGraphBase.py | 1 | 11533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
#
# Authors: Guilherme Polo <ggpolo@gmail.com>
# Tianwei Liu <liutainweidlut@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import datetime
from umit.icm.agent.gui.dashboard.timeline.Calendar import CalendarManager
from umit.icm.agent.gui.dashboard.timeline.Calendar import months
from umit.icm.agent.gui.dashboard.timeline.Calendar import monthname
from umit.icm.agent.gui.dashboard.timeline.Calendar import startup_calendar_opts
from umit.icm.agent.gui.dashboard.timeline.DataGrabber import DataGrabber
from umit.icm.agent.gui.dashboard.timeline.DataGrabber import DATA_GRAB_MODES
from umit.icm.agent.I18N import _
view_mode = {
"yearly": _("Yearly View"),
"monthly": _("Monthly View"),
"daily": _("Daily View"),
"hourly": _("Hourly View")
}
view_mode_order = ["yearly", "monthly", "daily", "hourly"]
view_mode_descr = {
"yearly": _("Year"),
"monthly": _("Month"),
"daily": _("Day"),
"hourly": _("Hour")
}
xlabels = {
"yearly": _("Months"),
"monthly": _("Days"),
"daily": _("Hours"),
"hourly": _("Minutes")
}
#Define the task,report,test
changes_in_db = {
'successful': _("Successful"), #Test
'failed': _("Failed"), #Test
'done': _("Done"), #Task
'wait': _("Wait"), #Task
'sent': _("Sent"), #Report
'unsent': _("unsent") #report
}
categories = dict(changes_in_db)
categories['changes_sum'] = _("Changes Sum")
view_kind_order = ["sum", "category"]
view_kind = {
"sum": _("Changes Sum"),
"category": _("By Category")
}
changes_list = changes_in_db.keys()
def colors_from_file_gdk():
"""
Retrieve colors from timeline settings file and convert to gdk format.
"""
colors = colors_from_file()
for key, value in colors.items():
colors[key] = [int(65535 * v) for v in value]
return colors
def colors_from_file():
"""
Retrieve colors from timeline settings file.
"""
colors = {
"inventory":[0.188, 0.659, 0.282],
"availability":[0.847, 0.016, 0.016],
"ports":[0.988, 0.82, 0.157],
"fingerprint":[0.369, 0.008, 0.353],
"several":[0.133, 0.361, 0.706],
"nothing":[0.545, 0.502, 0.514],
"changes_sum" :[0, 0.4, 1],
}
return colors
class TimeLineBase(CalendarManager, DataGrabber):
"""
This class does the necessary Timeline management.
"""
def __init__(self,connector,dashboard):
# using current date at startup
CalendarManager.__init__(self,**startup_calendar_opts())
DataGrabber.__init__(self,self)
self.connector = connector
self.dashboard = dashboard
self.grabber_method = None
self.grabber_params = None
self.labels = None
self.xlabel = None
self.selection = -1
self.selected_range = (None, None)
self.graph_kind = "category"
self.graph_mode = "daily"
self.update_grabber()
self.__conector_widgets()
def __conector_widgets(self):
"""
"""
self.connector.connect('selection_changed', self._handle_selection)
self.connector.connect('data_changed', self._update_graph_data)
self.connector.connect('date_update', self._update_date)
def _handle_selection(self,obj,selection):
"""
Handles TimeLine selection, detect the selected time range
"""
self.selection = selection
if selection == -1 :
start = end = None
elif self.graph_mode == "yearly": #months
selection += 1 # months starting at 1
start = datetime.datetime(self.year, selection, 1)
end = start + datetime.timedelta(days=self.get_monthrange(
self.year, selection)[1])
elif self.graph_mode == "monthly": # days
selection += 1 # days starting at 1
start = datetime.datetime(self.year, self.month, selection)
end = start + datetime.timedelta(days=1)
elif self.graph_mode == "daily": # hours
start = datetime.datetime(self.year, self.month, self.day,
selection)
end = start + datetime.timedelta(seconds=3600)
elif self.graph_mode == "hourly": # minutes
start = datetime.datetime(self.year, self.month, self.day,
self.hour, selection)
end = start + datetime.timedelta(seconds=60)
self.selected_range = (start,end)
self.connector.emit('selection_update', start, end)
def grab_data(self):
"""
Grab data for graph using current settings.
"""
return getattr(self, self.grabber_method)(self.grabber_params,self.dashboard.cur_tab)
def update_grabber(self):
"""
Updates grabber method, params and graph vlabels.
"""
if self.graph_kind == "sum":
grab_mode = self.graph_mode + "_" + self.graph_kind
else:
grab_mode = "category"
self.grabber_method = DATA_GRAB_MODES[grab_mode]
labels = [ ]
if self.graph_mode == "yearly":
params = (self.year, )
for m in months:
labels.append(m[:3])
elif self.graph_mode == "monthly":
params = (self.year, self.month)
for i in range(self.get_current_monthrange()[1]):
labels.append("%d" % (i + 1))
elif self.graph_mode == "daily":
params = (self.year, self.month, self.day)
for i in range(24):
labels.append("%d" % i)
elif self.graph_mode == "hourly":
params = (self.year, self.month, self.day, self.hour)
for i in range(60):
labels.append("%d" % i)
self.grabber_params = params
self.labels = labels
self.xlabel = xlabels[self.graph_mode]
def descr_by_graphmode(self):
"""
Returns a description with graph meaning.
"""
graph_descr = [
_("end of a week"), _("end of 12 hours period"),
_("end of half hour period"), _("end of a minute")
]
return _("Each point break represents ") + \
graph_descr[view_mode_order.index(self.graph_mode)]
def title_by_graphmode(self, useselection=False):
"""
Returns a formatted date based on current graph mode (Yearly,
Monthly, .. ).
"""
def adjust(date):
# prepends a 0 in cases where a date is minor than 10,
# so (example) hour 2 displays as 02.
if date < 10:
date = "0%s" % date
return date
if useselection and self.selection != -1:
fmtddate = [
"%s, %s" % (monthname((self.selection + 1) % 12),
# % 12 above is used so we don't have
# problems in case self.selection > 12,
# that is, when someone is viewing in
# other mode different than "Yearly".
self.year),
"%s, %s %s, %s" % (self.get_weekday(self.year,
self.month, (self.selection+1)%\
(self.get_current_monthrange()[1]+1))[1],
monthname(self.month), self.selection+1,
self.year),
"%s, %s %s, %s (%s:00)" % (self.get_current_weekday_name(),
monthname(self.month),
self.day, self.year,
adjust(self.selection % 23)),
"%s, %s %s, %s (%s:%s)" % (self.get_current_weekday_name(),
monthname(self.month), self.day,
self.year, adjust(self.hour),
adjust(self.selection))
]
else:
fmtddate = [
_("Year %(year)s") % {'year': self.year},
"%s, %s" % (monthname(self.month), self.year),
"%s, %s %s, %s" % (self.get_current_weekday_name(),
monthname(self.month), self.day,
self.year),
"%s, %s %s, %s (%s:00)" % (self.get_current_weekday_name(),
monthname(self.month),
self.day, self.year, self.hour)
]
return fmtddate[view_mode_order.index(self.graph_mode)]
def bounds_by_graphmode(self):
"""
Return min, max and current value for graph mode.
"""
values = [
(self.year_range[0], self.year_range[1], self.year),
(1, 12, self.month),
(1, self.get_current_monthrange()[1], self.day),
(0, 23, self.hour)
]
return values[view_mode_order.index(self.graph_mode)]
def _update_graph_data(self,obj,*args):
"""
Received a request to perform graph data update
"""
if args[0] and args[1]:
self.graph_mode = args[0]
self.graph_kind = args[1]
self.update_grabber()
glabel = self.title_by_graphmode() # graph title
dlabel = self.descr_by_graphmode() # graph description
line_filter, start, evts = self.grab_data()
self.connector.emit('data_update', line_filter, start, evts,
self.labels, self.xlabel, glabel, dlabel)
self.connector.emit('date_changed')
def _update_date(self,obj,arg):
"""
Update date based on current mode.
"""
modes = {
"yearly": "year",
"monthly": "month",
"daily": "day",
"hourly": "hour" }
if self.graph_mode in modes:
setattr(self, modes[self.graph_mode], arg)
self.connector.emit('data_changed',None,None)
| gpl-2.0 |
mdsafwan/Deal-My-Stuff | Lib/site-packages/django/core/serializers/python.py | 93 | 6585 | """
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils import six
from django.utils.encoding import force_text, is_protected_type
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
data = {
"model": force_text(obj._meta),
"fields": self._current,
}
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
data["pk"] = force_text(obj._get_pk_val(), strings_only=True)
return data
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
if not is_protected_type(value):
value = field.value_to_string(obj)
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: force_text(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
ignore = options.pop('ignorenonexistent', False)
for d in object_list:
# Look up the model and starting build a dict of data for it.
try:
Model = _get_model(d["model"])
except base.DeserializationError:
if ignore:
continue
else:
raise
data = {}
if 'pk' in d:
data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get("pk", None))
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Handle each field
for (field_name, field_value) in six.iteritems(d["fields"]):
if ignore and field_name not in field_names:
# skip fields no longer on model
continue
if isinstance(field_value, str):
field_value = force_text(
field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True
)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, models.ManyToManyRel):
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return force_text(field.rel.to._meta.pk.to_python(value), strings_only=True)
else:
m2m_convert = lambda v: force_text(field.rel.to._meta.pk.to_python(v), strings_only=True)
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
# Handle FK fields
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
if field_value is not None:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):
obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
value = value.pk
else:
value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
data[field.name] = field.to_python(field_value)
obj = base.build_instance(Model, data, db)
yield base.DeserializedObject(obj, m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
| apache-2.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/boto/cloudsearch2/optionstatus.py | 153 | 8121 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
class OptionStatus(dict):
"""
Presents a combination of status field (defined below) which are
accessed as attributes and option values which are stored in the
native Python dictionary. In this class, the option values are
merged from a JSON object that is stored as the Option part of
the object.
:ivar domain_name: The name of the domain this option is associated with.
:ivar create_date: A timestamp for when this option was created.
:ivar state: The state of processing a change to an option.
Possible values:
* RequiresIndexDocuments: the option's latest value will not
be visible in searches until IndexDocuments has been called
and indexing is complete.
* Processing: the option's latest value is not yet visible in
all searches but is in the process of being activated.
* Active: the option's latest value is completely visible.
:ivar update_date: A timestamp for when this option was updated.
:ivar update_version: A unique integer that indicates when this
option was last updated.
"""
def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None,
save_fn=None):
self.domain = domain
self.refresh_fn = refresh_fn
self.refresh_key = refresh_key
self.save_fn = save_fn
self.refresh(data)
def _update_status(self, status):
self.creation_date = status['CreationDate']
self.status = status['State']
self.update_date = status['UpdateDate']
self.update_version = int(status['UpdateVersion'])
def _update_options(self, options):
if options:
self.update(options)
def refresh(self, data=None):
"""
Refresh the local state of the object. You can either pass
new state data in as the parameter ``data`` or, if that parameter
is omitted, the state data will be retrieved from CloudSearch.
"""
if not data:
if self.refresh_fn:
data = self.refresh_fn(self.domain.name)
if data and self.refresh_key:
# Attempt to pull out the right nested bag of data
for key in self.refresh_key:
data = data[key]
if data:
self._update_status(data['Status'])
self._update_options(data['Options'])
def to_json(self):
"""
Return the JSON representation of the options as a string.
"""
return json.dumps(self)
def save(self):
"""
Write the current state of the local object back to the
CloudSearch service.
"""
if self.save_fn:
data = self.save_fn(self.domain.name, self.to_json())
self.refresh(data)
class IndexFieldStatus(OptionStatus):
def save(self):
pass
class AvailabilityOptionsStatus(OptionStatus):
def save(self):
pass
class ScalingParametersStatus(IndexFieldStatus):
pass
class ExpressionStatus(IndexFieldStatus):
pass
class ServicePoliciesStatus(OptionStatus):
def new_statement(self, arn, ip):
"""
Returns a new policy statement that will allow
access to the service described by ``arn`` by the
ip specified in ``ip``.
:type arn: string
:param arn: The Amazon Resource Notation identifier for the
service you wish to provide access to. This would be
either the search service or the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
return {
"Effect": "Allow",
"Action": "*", # Docs say use GET, but denies unless *
"Resource": arn,
"Condition": {
"IpAddress": {
"aws:SourceIp": [ip]
}
}
}
def _allow_ip(self, arn, ip):
if 'Statement' not in self:
s = self.new_statement(arn, ip)
self['Statement'] = [s]
self.save()
else:
add_statement = True
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
add_statement = False
condition = statement['Condition'][condition_name]
if ip not in condition['aws:SourceIp']:
condition['aws:SourceIp'].append(ip)
if add_statement:
s = self.new_statement(arn, ip)
self['Statement'].append(s)
self.save()
def allow_search_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def allow_doc_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def _disallow_ip(self, arn, ip):
if 'Statement' not in self:
return
need_update = False
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
condition = statement['Condition'][condition_name]
if ip in condition['aws:SourceIp']:
condition['aws:SourceIp'].remove(ip)
need_update = True
if need_update:
self.save()
def disallow_search_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
def disallow_doc_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
| mit |
cysnake4713/odoo | addons/sale/wizard/sale_make_invoice.py | 344 | 3410 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_make_invoice(osv.osv_memory):
_name = "sale.make.invoice"
_description = "Sales Make Invoice"
_columns = {
'grouped': fields.boolean('Group the invoices', help='Check the box to group the invoices for the same customers'),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'grouped': False,
'invoice_date': fields.date.context_today,
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False)
order = self.pool.get('sale.order').browse(cr, uid, record_id, context=context)
if order.state == 'draft':
raise osv.except_osv(_('Warning!'), _('You cannot create invoice when sales order is not confirmed.'))
return False
def make_invoices(self, cr, uid, ids, context=None):
order_obj = self.pool.get('sale.order')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
newinv = []
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
for sale_order in order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context):
if sale_order.state != 'manual':
raise osv.except_osv(_('Warning!'), _("You shouldn't manually invoice the following sale order %s") % (sale_order.name))
order_obj.action_invoice_create(cr, uid, context.get(('active_ids'), []), data['grouped'], date_invoice=data['invoice_date'])
orders = order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context)
for o in orders:
for i in o.invoice_ids:
newinv.append(i.id)
# Dummy call to workflow, will not create another invoice but bind the new invoice to the subflow
order_obj.signal_workflow(cr, uid, [o.id for o in orders if o.order_policy == 'manual'], 'manual_invoice')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = "[('id','in', [" + ','.join(map(str, newinv)) + "])]"
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
soulmachine/scikit-learn | sklearn/kernel_approximation.py | 3 | 16954 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://eprints.pascal-network.org/archive/00006964/01/vedaldi10.pdf>`_
Vedaldi, A. and Zisserman, A., Computer Vision and Pattern Recognition 2010
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
rnd = check_random_state(self.random_state)
if not sp.issparse(X):
X = np.asarray(X)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
sloanyang/gyp | pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| bsd-3-clause |
libAtoms/matscipy | matscipy/surface.py | 1 | 8725 | # ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import itertools
import functools
import numpy as np
from numpy.linalg import norm, inv
def gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
class MillerIndex(np.ndarray):
"""
Representation of a three of four index Miller direction or plane
A :class:`MillerIndex` can be constructed from vector or parsed from a string::
x = MillerIndex('-211')
y = MillerIndex('111', type='plane')
z = x.cross(y)
print x # prints "[-211]"
print y # prints "(111)", note round brackets denoting a plane
print z.latex()
assert(angle_between(x,y) == pi/2.)
assert(angle_between(y,z) == pi/2.)
assert(angle_between(x,z) == pi/2.)
"""
__array_priority__ = 101.0
brackets = {'direction': '[]',
'direction_family': '<>',
'plane': '()',
'plane_family': '{}'}
all_brackets = list(itertools.chain(*brackets.values()))
def __new__(cls, v=None, type='direction'):
if isinstance(v, str):
v = MillerIndex.parse(v)
if len(v) == 3 or len(v) == 4:
self = np.ndarray.__new__(cls, len(v))
self[:] = v
else:
raise ValueError('%s input v should be of length 3 or 4' % cls.__name__)
self.type = type
self.simplify()
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.type = getattr(obj, 'type', 'direction')
def __repr__(self):
return ('%s(['+'%d'*len(self)+'])') % ((self.__class__.__name__,) + tuple(self))
def __str__(self):
bopen, bclose = MillerIndex.brackets[self.type]
return (bopen+'%d'*len(self)+bclose) % tuple(self)
def latex(self):
"""
Format this :class:`MillerIndex` as a LaTeX string
"""
s = '$'
bopen, bclose = MillerIndex.brackets[self.type]
s += bopen
for component in self:
if component < 0:
s += r'\bar{%d}' % abs(component)
else:
s += '%d' % component
s += bclose
s += '$'
return s
@classmethod
def parse(cls, s):
r"""
Parse a Miller index string
Negative indices can be denoted by:
1. leading minus sign, e.g. ``[11-2]``
2. trailing ``b`` (for 'bar'), e.g. ``112b``
3. LaTeX ``\bar{}``, e.g. ``[11\bar{2}]`` (which renders as :math:`[11\bar{2}]` in LaTeX)
Leading or trailing brackets of various kinds are ignored.
i.e. ``[001]``, ``{001}``, ``(001)``, ``[001]``, ``<001>``, ``001`` are all equivalent.
Returns an array of components (i,j,k) or (h,k,i,l)
"""
if not isinstance(s, str):
raise TypeError("Can't parse from %r of type %r" % (s, type(s)))
orig_s = s
for (a, b) in [(r'\bar{','-')] + [(b,'') for b in MillerIndex.all_brackets]:
s = s.replace(a, b)
L = list(s)
components = np.array([1,1,1,1]) # space for up to 4 elements
i = 3 # parse backwards from end of string
while L:
if i < 0:
raise ValueError('Cannot parse Miller index from string "%s", too many components found' % orig_s)
c = L.pop()
if c == '-':
if i == 3:
raise ValueError('Miller index string "%s" cannot end with a minus sign' % orig_s)
components[i+1] *= -1
elif c == 'b':
components[i] *= -1
elif c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
components[i] *= int(c)
i -= 1
else:
raise ValueError('Unexpected character "%s" in miller index string "%s"' % (c, orig_s))
if i == 0:
return components[1:]
elif i == -1:
return components
else:
raise ValueError('Cannot parse Miller index from string %s, too few components found' % orig_s)
self.simplify()
def simplify(self):
"""
Simplify by dividing through by greatest common denominator
"""
d = abs(functools.reduce(gcd, self))
self[:] /= d
def simplified(self):
copy = self.copy()
copy.simplify()
return copy
def norm(self):
return np.linalg.norm(self)
def normalised(self):
a = self.as3()
return np.array(a, dtype=float)/a.norm()
hat = normalised
def cross(self, other):
a = self.as3()
b = MillerIndex(other).as3()
return np.cross(a, b).view(MillerIndex).simplified()
def cosine(self, other):
other = MillerIndex(other)
return np.dot(self.normalised(), other.normalised())
def angle(self, other):
return np.arccos(self.cosine(other))
def as4(self):
if len(self) == 4:
return self
else:
h, k, l = self
i = -(h+l)
return MillerIndex((h,k,i,l))
def as3(self):
if len(self) == 3:
return self
else:
h, k, i, l = self
return MillerIndex((h, k, l))
def plane_spacing(self, a):
return a/self.as3().norm()
def MillerPlane(v):
"""Special case of :class:`MillerIndex` with ``type="plane"``"""
return MillerIndex(v, 'plane')
def MillerDirection(v):
"""Special case of :class:`MillerIndex` with ``type="direction"`` (the default)"""
return MillerIndex(v, 'direction')
def angle_between(a, b):
"""Angle between crystallographic directions between a=[ijk] and b=[lmn], in radians."""
return MillerIndex(a).angle(b)
def make_unit_slab(unit_cell, axes):
"""
General purpose unit slab creation routine
Only tested with cubic unit cells.
Code translated from quippy.structures.unit_slab()
https://github.com/libAtoms/QUIP/blob/public/src/libAtoms/Structures.f95
Arguments
---------
unit_cell : Atoms
Atoms object containing primitive unit cell
axes: 3x3 array
Miller indices of desired slab, as columns
Returns
-------
slab : Atoms
Output slab, with axes aligned with x, y, z.
"""
a1 = axes[:,0]
a2 = axes[:,1]
a3 = axes[:,2]
rot = np.zeros((3,3))
rot[0,:] = a1/norm(a1)
rot[1,:] = a2/norm(a2)
rot[2,:] = a3/norm(a3)
pos = unit_cell.get_positions().T
lattice = unit_cell.get_cell().T
lattice = np.dot(rot, lattice)
at = unit_cell.copy()
at.set_positions(np.dot(rot, pos).T)
at.set_cell(lattice.T)
sup = at * (5,5,5)
sup.positions[...] -= sup.positions.mean(axis=0)
sup_lattice = np.zeros((3,3))
for i in range(3):
sup_lattice[:,i] = (axes[0,i]*lattice[:,0] +
axes[1,i]*lattice[:,1] +
axes[2,i]*lattice[:,2])
sup.set_cell(sup_lattice.T, scale_atoms=False)
# Form primitive cell by discarding atoms with
# lattice coordinates outside range [-0.5,0.5]
d = [0.01,0.02,0.03] # Small shift to avoid conincidental alignments
i = 0
g = inv(sup_lattice)
sup_pos = sup.get_positions().T
while True:
t = np.dot(g, sup_pos[:, i] + d)
if (t <= -0.5).any() | (t >= 0.5).any():
del sup[i]
sup_pos = sup.get_positions().T
i -= 1 # Retest since we've removed an atom
if i == len(sup)-1:
break
i += 1
sup.set_scaled_positions(sup.get_scaled_positions())
return sup
| gpl-2.0 |
krieger-od/nwjs_chromium.src | chrome/common/extensions/docs/server2/manifest_features.py | 122 | 1683 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Provides a Manifest Feature abstraction, similar to but more strict than the
Feature schema (see feature_utility.py).
Each Manifest Feature has a 'level' in addition to the keys defined in a
Feature. 'level' can be 'required', 'only_one', 'recommended', or 'optional',
indicating how an app or extension should define a manifest property. If 'level'
is missing, 'optional' is assumed.
'''
def ConvertDottedKeysToNested(features):
'''Some Manifest Features are subordinate to others, such as app.background to
app. Subordinate Features can be moved inside the parent Feature under the key
'children'.
Modifies |features|, a Manifest Features dictionary, by moving subordinate
Features with names of the form 'parent.child' into the 'parent' Feature.
Child features are renamed to the 'child' section of their previous name.
Applied recursively so that children can be nested arbitrarily.
'''
def add_child(features, parent, child_name, value):
value['name'] = child_name
if not 'children' in features[parent]:
features[parent]['children'] = {}
features[parent]['children'][child_name] = value
def insert_children(features):
for name in features.keys():
if '.' in name:
value = features.pop(name)
parent, child_name = name.split('.', 1)
add_child(features, parent, child_name, value)
for value in features.values():
if 'children' in value:
insert_children(value['children'])
insert_children(features)
return features
| bsd-3-clause |
Anonymouslemming/ansible | lib/ansible/modules/monitoring/sensu_subscription.py | 17 | 5330 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_subscription
short_description: Manage Sensu subscriptions
version_added: 2.2
description:
- Manage which I(sensu channels) a machine should subscribe to
options:
name:
description:
- The name of the channel
required: true
state:
description:
- Whether the machine should subscribe or unsubscribe from the channel
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the subscriptions json file
required: false
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
requirements: [ ]
author: Anders Ingemann
'''
RETURN = '''
reasons:
description: the reasons why the moule changed or did not change something
returned: success
type: list
sample: ["channel subscription was absent and state is `present'"]
'''
EXAMPLES = '''
# Subscribe to the nginx channel
- name: subscribe to nginx checks
sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: unsubscribe from common checks
sensu_subscription: name=common state=absent
'''
def sensu_subscription(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
config = json.load(open(path))
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
if 'client' not in config:
if state == 'absent':
reasons.append('`client\' did not exist and state is `absent\'')
return changed, reasons
config['client'] = {}
changed = True
reasons.append('`client\' did not exist')
if 'subscriptions' not in config['client']:
if state == 'absent':
reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
return changed, reasons
config['client']['subscriptions'] = []
changed = True
reasons.append('`client.subscriptions\' did not exist')
if name not in config['client']['subscriptions']:
if state == 'absent':
reasons.append('channel subscription was absent')
return changed, reasons
config['client']['subscriptions'].append(name)
changed = True
reasons.append('channel subscription was absent and state is `present\'')
else:
if state == 'absent':
config['client']['subscriptions'].remove(name)
changed = True
reasons.append('channel subscription was present and state is `absent\'')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, 'w').write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
}
module = AnsibleModule(argument_spec=arg_spec,
supports_check_mode=True)
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
MarcosCommunity/odoo | addons/web_analytics/__openerp__.py | 305 | 1432 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/website-builder',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
| agpl-3.0 |
guewen/purchase-workflow | __unported__/purchase_group_orders/__openerp__.py | 4 | 1626 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alexandre Fayolle
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name' : 'Purchase Group Orders by Shop and Carrier',
'version' : '0.4',
'author' : 'Camptocamp',
'maintainer': 'Camptocamp',
'category': 'Purchase Management',
'complexity': "normal", # easy, normal, expert
'depends' : ['delivery', 'sale', 'purchase',
],
'description': """Only merge PO with the same shop and carrier.
This eases the warehouse managements as the incoming pickings are grouped
in a more convenient way.
""",
'website': 'http://www.camptocamp.com/',
'init_xml': [],
'update_xml': ['purchase_group_orders_view.xml'],
'demo_xml': [],
'tests': [],
'installable': False,
'auto_install': False,
'license': 'AGPL-3',
'application': False
}
| agpl-3.0 |
jamesbdunlop/tk-jbd-submit-mayaplayblast | python/lib/cam_lib.py | 1 | 3114 | import maya.cmds as cmds
from tank.platform.qt import QtCore, QtGui
def _findShotCamera():
"""
Shot camera setup. You can replace this entire func with your own code to return the correct cameraShape for the app to use.
"""
camera = []
for each in cmds.ls(type = 'camera'):
getCamTransform = cmds.listRelatives(each, parent = True)[0]
## We don't care about any suffix used, we're looking for an attr called type on the camera here to find the shot cam.
## You can change this to find your shot camera as you need
if cmds.objExists('%s.type' % getCamTransform):
camera.append(each)
if not camera:
QtGui.QMessageBox.information(None, "Aborted...", 'No shotCam found!!')
return -1
else:
if len(camera) > 1:
QtGui.QMessageBox.information(None, "Aborted...", 'Make sure you have only ONE shot camera in the scene!')
return -1
else:
## Camera is the first in the list.
cam = camera[0]
return cam
def _setCameraDefaults(camera = ''):
"""
Sets the base defaults for the camera
@param camera: The name of the camera transform node NOT the shape node!
@type camera: String
"""
if not camera:
camera = None
if camera:
camName = camera
camShape = cmds.listRelatives(camera, shapes = True)[0]
cmds.camera(camName, e = True, displayFilmGate = 0, displayResolution = 1, overscan = 1.19)
cmds.setAttr("%s.displayGateMask" % camShape, 1)
cmds.setAttr('%s.displayGateMaskOpacity' % camShape, 1)
cmds.setAttr('%s.displayGateMaskColor' % camShape, 0, 0, 0, type = 'double3' )
cmds.setAttr("%s.displayResolution" % camShape, 1)
cmds.setAttr("%s.displaySafeAction" % camShape, 1)
cmds.setAttr("%s.journalCommand" % camShape, 0)
cmds.setAttr("%s.nearClipPlane" % camShape, 0.05)
cmds.setAttr("%s.overscan" % camShape, 1)
else:
cmds.warning('No shotcam found!')
def _createCamGate(camera = '', pathToImage = ''):
if not camera:
camera = _findShotCamera()
if camera:
if not cmds.objExists('camGate'):
cmds.imagePlane(n = 'camGate')
cmds.rename('camGate1', 'camGate')
cmds.pointConstraint('%s' % camera, 'camGate', mo = False, n ='tmpPoint')
cmds.orientConstraint('%s' % camera, 'camGate', mo = False, n ='tmpOrient')
cmds.delete(['tmpPoint', 'tmpOrient'])
cmds.parent('camGate', '%s' % camera)
cmds.connectAttr('camGateShape.message', '%sShape.imagePlane[0]' % camera, f = True)
cmds.setAttr('camGate.depth', 0.01)
cmds.setAttr('camGate.sizeX', 1.710)
cmds.setAttr('camGate.sizeY', 2)
cmds.setAttr('camGate.offsetX', 0.004)
cmds.setAttr('camGate.offsetY', 0.003)
cmds.setAttr('camGateShape.imageName', pathToImage, type = 'string')
cmds.setAttr('camGateShape.lockedToCamera', 1)
cmds.setAttr('camGateShape.displayOnlyIfCurrent', 1) | apache-2.0 |
utecuy/edx-platform | common/lib/xmodule/xmodule/library_tools.py | 154 | 7784 | """
XBlock runtime services for LibraryContentModule
"""
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator
from search.search_engine_base import SearchEngine
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.capa_module import CapaDescriptor
def normalize_key_for_search(library_key):
""" Normalizes library key for use with search indexing """
return library_key.replace(version_guid=None, branch=None)
class LibraryToolsService(object):
"""
Service that allows LibraryContentModule to interact with libraries in the
modulestore.
"""
def __init__(self, modulestore):
self.store = modulestore
def _get_library(self, library_key):
"""
Given a library key like "library-v1:ProblemX+PR0B", return the
'library' XBlock with meta-information about the library.
A specific version may be specified.
Returns None on error.
"""
if not isinstance(library_key, LibraryLocator):
library_key = LibraryLocator.from_string(library_key)
try:
return self.store.get_library(
library_key, remove_version=False, remove_branch=False, head_validation=False
)
except ItemNotFoundError:
return None
def get_library_version(self, lib_key):
"""
Get the version (an ObjectID) of the given library.
Returns None if the library does not exist.
"""
library = self._get_library(lib_key)
if library:
# We need to know the library's version so ensure it's set in library.location.library_key.version_guid
assert library.location.library_key.version_guid is not None
return library.location.library_key.version_guid
return None
def create_block_analytics_summary(self, course_key, block_keys):
"""
Given a CourseKey and a list of (block_type, block_id) pairs,
prepare the JSON-ready metadata needed for analytics logging.
This is [
{"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]}
]
where the main list contains all top-level blocks, and descendants contains a *flat* list of all
descendants of the top level blocks, if any.
"""
def summarize_block(usage_key):
""" Basic information about the given block """
orig_key, orig_version = self.store.get_block_original_usage(usage_key)
return {
"usage_key": unicode(usage_key),
"original_usage_key": unicode(orig_key) if orig_key else None,
"original_usage_version": unicode(orig_version) if orig_version else None,
}
result_json = []
for block_key in block_keys:
key = course_key.make_usage_key(*block_key)
info = summarize_block(key)
info['descendants'] = []
try:
block = self.store.get_item(key, depth=None) # Load the item and all descendants
children = list(getattr(block, "children", []))
while children:
child_key = children.pop()
child = self.store.get_item(child_key)
info['descendants'].append(summarize_block(child_key))
children.extend(getattr(child, "children", []))
except ItemNotFoundError:
pass # The block has been deleted
result_json.append(info)
return result_json
def _problem_type_filter(self, library, capa_type):
""" Filters library children by capa type"""
search_engine = SearchEngine.get_search_engine(index="library_index")
if search_engine:
filter_clause = {
"library": unicode(normalize_key_for_search(library.location.library_key)),
"content_type": CapaDescriptor.INDEX_CONTENT_TYPE,
"problem_types": capa_type
}
search_result = search_engine.search(field_dictionary=filter_clause)
results = search_result.get('results', [])
return [LibraryUsageLocator.from_string(item['data']['id']) for item in results]
else:
return [key for key in library.children if self._filter_child(key, capa_type)]
def _filter_child(self, usage_key, capa_type):
"""
Filters children by CAPA problem type, if configured
"""
if usage_key.block_type != "problem":
return False
descriptor = self.store.get_item(usage_key, depth=0)
assert isinstance(descriptor, CapaDescriptor)
return capa_type in descriptor.problem_types
def can_use_library_content(self, block):
"""
Determines whether a modulestore holding a course_id supports libraries.
"""
return self.store.check_supports(block.location.course_key, 'copy_from_template')
def update_children(self, dest_block, user_id, user_perms=None, version=None):
"""
This method is to be used when the library that a LibraryContentModule
references has been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of dest_block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update dest_block's 'source_library_version' field to
store the version number of the libraries used, so we easily determine
if dest_block is up to date or not.
"""
if user_perms and not user_perms.can_write(dest_block.location.course_key):
raise PermissionDenied()
if not dest_block.source_library_id:
dest_block.source_library_version = ""
return
source_blocks = []
library_key = dest_block.source_library_key
if version:
library_key = library_key.replace(branch=ModuleStoreEnum.BranchName.library, version_guid=version)
library = self._get_library(library_key)
if library is None:
raise ValueError("Requested library not found.")
if user_perms and not user_perms.can_read(library_key):
raise PermissionDenied()
filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE)
if filter_children:
# Apply simple filtering based on CAPA problem types:
source_blocks.extend(self._problem_type_filter(library, dest_block.capa_type))
else:
source_blocks.extend(library.children)
with self.store.bulk_operations(dest_block.location.course_key):
dest_block.source_library_version = unicode(library.location.library_key.version_guid)
self.store.update_item(dest_block, user_id)
head_validation = not version
dest_block.children = self.store.copy_from_template(
source_blocks, dest_block.location, user_id, head_validation=head_validation
)
# ^-- copy_from_template updates the children in the DB
# but we must also set .children here to avoid overwriting the DB again
def list_available_libraries(self):
"""
List all known libraries.
Returns tuples of (LibraryLocator, display_name)
"""
return [
(lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name)
for lib in self.store.get_libraries()
]
| agpl-3.0 |
gis-rpd/pipelines | custom/SG10K/mux_summary.py | 1 | 9302 | #!/usr/bin/env python3
# FIXME hardcoded python because we need for xlsxwriter
"""Creating summary table for SG10K batch
"""
import glob
import os
import csv
import sys
import yaml
import xlsxwriter
WRITE_CSV = False
WRITE_XLS = True
WRITE_CONSOLE = False
SUMMARY_KEYS = ["raw total sequences:",
#"reads properly paired:",
'reads properly paired [%]',# ours
#"reads duplicated:",
'reads duplicated [%]', # ours
#"bases mapped (cigar):",
#'% bases mapped',
#'base coverage',
"error rate:",
'insert size average:',
'insert size standard deviation:']
ETHNICITIES = ['CHS', 'INS', 'MAS']
MAX_CONT = 0.01999999
MIN_DEPTH = 13.95
MIN_COV_SOP = 14.95
def parse_summary_from_stats(statsfile):#, genome_size=GENOME_SIZE['hs37d5']):
"""FIXME:add-doc"""
sn = dict()
with open(statsfile) as fh:
for line in fh:
if not line.startswith("SN"):
continue
ls = line.strip().split("\t")[1:]
sn[ls[0]] = float(ls[1])
sn['bases mapped [%]'] = 100 * sn["bases mapped (cigar):"]/float(sn["raw total sequences:"] * sn["average length:"])
sn['reads properly paired [%]'] = 100 * sn["reads properly paired:"]/float(sn["raw total sequences:"])
sn['reads duplicated [%]'] = 100 * sn["reads duplicated:"]/float(sn["raw total sequences:"])
#sn['base coverage'] = sn["bases mapped (cigar):"]/float(genome_size)
return sn
def parse_selfsm(selfsm_file):
"""FIXME:add-doc"""
with open(selfsm_file) as fh:
header = fh.readline()[1:].split()
#print(headers)
values = fh.readline().split()
#print(values)
d = dict(zip(header, values))
for k in ['AVG_DP', 'FREELK1', 'FREELK0', 'FREEMIX']:
try:
d[k] = float(d[k])
except ValueError:
pass
for k in ['#SNPS', '#READS']:
try:
d[k] = int(d[k])
except ValueError:
pass
return d
# print(parse_selfsm("WHH1253/out/WHH1253/WHH1253.bwamem.fixmate.mdups.srt.recal.CHS.selfSM"))
def check_completion(conf_yamls, num_expected):
"""FIXME:add-doc"""
print("Verifying completeness based on {} config yaml files...".format(len(conf_yamls)))
# check completion
#
num_complete = 0
num_incomplete = 0
# assuming conf.yaml is in run folder
for f in conf_yamls:
with open(f) as fh:
cfg = dict(yaml.safe_load(fh))
num_samples = len(cfg['samples'])
snake_log = os.path.join(os.path.dirname(f), "logs/snakemake.log")
with open(snake_log) as fh:
loglines = fh.readlines()
if 'Pipeline run successfully completed' in ''.join(loglines[-10:]):
num_complete += num_samples
else:
num_incomplete += num_samples
print("{} completed".format(num_complete))
print("{} incomplete".format(num_incomplete))
print("(Note, numbers can be misleading for multisample runs (a single failure anywhere fails all samples)")
assert num_complete == num_expected
print("Okay. Proceeding...")
def main(conf_yamls, num_expected):
"""main
"""
check_completion(conf_yamls, num_expected)
if WRITE_CSV:
assert not os.path.exists('summary.csv')
csvfile = open('summary.csv', 'w')
print("Writing to summary.csv")
csvwriter = csv.writer(csvfile, delimiter='\t')
else:
print("Not writing cvs-file")
if WRITE_XLS:
assert not os.path.exists('summary.xls')
xls = "summary.xls"
print("Writing to summary.xls")
workbook = xlsxwriter.Workbook(xls)
worksheet = workbook.add_worksheet()
fmtheader = workbook.add_format({'bold': True, 'align': 'center'})
fmtintcomma = workbook.add_format({'num_format': '###,###,###,###0'})
fmt00 = workbook.add_format({'num_format': '0.0'})
fmt00000 = workbook.add_format({'num_format': '0.0000'})
fmt000 = workbook.add_format({'num_format': '0.0'})
worksheet.set_row(0, None, fmtheader)
worksheet.set_column('B:B', 20, fmtintcomma)
worksheet.set_column('C:D', None, fmt00)
worksheet.set_column('F:H', None, fmt00)
worksheet.set_column('E:E', None, fmt00000)
worksheet.set_column('I:K', None, fmt00000)
worksheet.set_column('L:L', None, fmt000)# qc
format1 = workbook.add_format({'bold': 1, 'italic': 1, 'font_color': '#FF0000'})
worksheet.conditional_format('H2:H100',
{'type': 'cell',
'criteria': '<',
'value': MIN_DEPTH,
'format': format1})
worksheet.conditional_format('L2:L100',
{'type': 'cell',
'criteria': '<',
'value': MIN_COV_SOP,
'format': format1})
worksheet.conditional_format('I2:K100',
{'type': 'cell',
'criteria': '>',
'value': MAX_CONT,
'format': format1})
xls_row_no = 0
else:
print("Not writing to xls-file")
if WRITE_CONSOLE:
print("Writing to console")
else:
print("Not writing to console")
header = ["sample"]
for key in SUMMARY_KEYS:
key = key.strip(" :")
header.append(key)
header.append("Avg. Depth")
for key in ETHNICITIES:
header.append("Cont. " + key)
header.append("Cov. (SOP 06-2017)")
if WRITE_CONSOLE:
print("\t".join(header))
if WRITE_CSV:
csvwriter.writerow(header)
if WRITE_XLS:
for xls_col_no, cell_data in enumerate(header):
worksheet.write(xls_row_no, xls_col_no, cell_data)
xls_row_no += 1
for f in conf_yamls:
outdir = os.path.join(os.path.dirname(f), "out")
with open(f) as fh:
cfg = dict(yaml.safe_load(fh))
for sample in cfg['samples']:
statsfile = glob.glob(os.path.join(outdir, sample, "*.bwamem.fixmate.mdups.srt.recal.bamstats/stats.txt"))
assert len(statsfile) == 1
statsfile = statsfile[0]
summary = parse_summary_from_stats(statsfile)
row = [sample]
row.extend([summary[k] for k in SUMMARY_KEYS])
selfsm_files = glob.glob(os.path.join(outdir, sample, "*.bwamem.fixmate.mdups.srt.recal.*selfSM"))
selfsm = dict()
for f in selfsm_files:
ethnicity = f.split(".")[-2]
selfsm[ethnicity] = parse_selfsm(f)
assert sorted(list(selfsm.keys())) == sorted(ETHNICITIES)
#avg_dp = set([v['AVG_DP'] for v in selfsm.values()])
#assert len(avg_dp) == 1, avg_dp
# rounding errors
avg_dp = set([v['AVG_DP'] for v in selfsm.values()])
avg_dp = list(avg_dp)[0]
if avg_dp < MIN_DEPTH:
sys.stderr.write("DP threshold reached for {}: {} < {}\n".format(sample, avg_dp, MIN_DEPTH))
row.append(avg_dp)
for e in ETHNICITIES:
cont = selfsm[e]['FREEMIX']
if cont > MAX_CONT:
sys.stderr.write("CONT threshold reached for {}: {} > {}\n".format(sample, cont, MAX_CONT))
row.append(cont)
covsop_file = glob.glob(os.path.join(outdir, sample, "*.bwamem.fixmate.mdups.srt.recal.qc-062017.txt"))
if covsop_file:
covsop_file = covsop_file[0]
with open(covsop_file) as fh:
l = fh.readline()
covsop = int(l.split()[-1]) / float(3.1*10**9)
if covsop < MIN_COV_SOP:
sys.stderr.write("covsop smaller threshold for {}: {} < {}\n".format(sample, covsop, MIN_COV_SOP))
else:
sys.stderr.write("covsop missing for {}\n".format(sample))
covsop = ""
row.append(covsop)
if WRITE_CONSOLE:
print("\t".join(["{}".format(v) for v in row]))
if WRITE_CSV:
csvwriter.writerow(row)
if WRITE_XLS:
for xls_col_no, cell_data in enumerate(row):
worksheet.write(xls_row_no, xls_col_no, cell_data)
xls_row_no += 1
if WRITE_CSV:
csvfile.close()
if WRITE_XLS:
workbook.close()
print("Please format xls file now and mark any outliers report above")
print("Successful completion")
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.stderr.write("FATAL: need 2 arguments, num_of_libraries and conf.yaml(s)\n")
sys.exit(1)
NUM_EXPECTED = int(sys.argv[1])
CONF_YAMLS = sys.argv[2:]
assert CONF_YAMLS, ("No conf.yaml file/s given as argument")
assert all([os.path.exists(f) for f in CONF_YAMLS])
main(CONF_YAMLS, NUM_EXPECTED)
| mit |
Venturi/oldcms | env/lib/python2.7/site-packages/django/contrib/gis/geoip/libgeoip.py | 106 | 1081 | import os
from ctypes import CDLL
from ctypes.util import find_library
from django.conf import settings
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {key: getattr(settings, key)
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key)}
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name:
lib_path = find_library(lib_name)
if lib_path is None:
raise RuntimeError('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Getting the C `free` for the platform.
if os.name == 'nt':
libc = CDLL('msvcrt')
else:
libc = CDLL(None)
free = libc.free
| apache-2.0 |
ChromiumWebApps/chromium | third_party/python_gflags/setup.py | 376 | 1991 | #!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
setup(name='python-gflags',
version='2.0',
description='Google Commandline Flags Module',
license='BSD',
author='Google Inc. and others',
author_email='google-gflags@googlegroups.com',
url='http://code.google.com/p/python-gflags',
py_modules=["gflags", "gflags_validators"],
data_files=[("bin", ["gflags2man.py"])],
include_package_data=True,
)
| bsd-3-clause |
Tesora-Release/tesora-trove | trove/guestagent/strategies/restore/couchdb_impl.py | 1 | 1543 | # Copyright 2016 IBM Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.couchdb import service
from trove.guestagent.strategies.restore import base
class CouchDBBackup(base.RestoreRunner):
__strategy_name__ = 'couchdbbackup'
base_restore_cmd = 'sudo tar xPf -'
def __init__(self, *args, **kwargs):
self.appStatus = service.CouchDBAppStatus()
self.app = service.CouchDBApp(self.appStatus)
super(CouchDBBackup, self).__init__(*args, **kwargs)
def post_restore(self):
"""
To restore from backup, all we need to do is untar the compressed
database files into the database directory and change its ownership.
"""
operating_system.chown(service.COUCHDB_LIB_DIR,
'couchdb',
'couchdb',
as_root=True)
self.app.restart()
| apache-2.0 |
mfherbst/spack | var/spack/repos/builtin/packages/perl-task-weaken/package.py | 5 | 1576 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlTaskWeaken(PerlPackage):
"""Ensure that a platform has weaken support"""
homepage = "http://search.cpan.org/~adamk/Task-Weaken-1.04/lib/Task/Weaken.pm"
url = "http://search.cpan.org/CPAN/authors/id/A/AD/ADAMK/Task-Weaken-1.04.tar.gz"
version('1.04', 'affd0c395515bb95d29968404d7fe6de')
| lgpl-2.1 |
tredly/tredly | components/tredly-host/actions/move.py | 2 | 2062 | # Performs actions requested by the user
import builtins
from subprocess import Popen, PIPE
import urllib.request
import os.path
import time
import argparse
from objects.tredly.tredlyhost import *
from objects.tredly.container import *
from includes.util import *
from includes.defines import *
from includes.output import *
# config the host
class ActionMove:
def __init__(self, subject, target, identifier, actionArgs):
tredlyHost = TredlyHost()
# check the subject of this action
if (subject == "container"):
# target == container uuid
# identifier == host to move to
# move the container to the new host
self.moveContainer(target, identifier)
else:
e_error("No command " + subject + " found.")
exit(1)
# move a container to a new host
def moveContainer(self, containerUuid, host):
tredlyHost = TredlyHost()
# Checks:
if (containerUuid is None):
e_error("Please include a UUID to move.")
exit(1)
if (host is None):
e_error("Please include a host to move to")
exit(1)
# make sure the container exists
if (not tredlyHost.containerExists(containerUuid)):
e_error("Container " + containerUuid + " does not exist")
exit(1)
# TODO: make sure the destination host partition exists
# get the partition name
partitionName = tredlyHost.getContainerPartition(containerUuid)
# set up the dataset
localDataset = ZFS_TREDLY_PARTITIONS_DATASET + '/' + partitionName + '/' + TREDLY_CONTAINER_DIR_NAME + '/' + containerUuid
container = Container()
container.loadFromZFS(localDataset)
e_header("Moving container " + containerUuid + " to host " + host)
# move the container, and exit 1 if it fails
if (not container.moveToHost(host)):
exit(1) | mit |
ombt/analytics | books/programming_in_python_3/book_examples/py31eg/untar.py | 2 | 2170 | #!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# Useful on Windows where tar isn't supplied as standard
BZ2_AVAILABLE = True
try:
import bz2
except ImportError:
BZ2_AVAILABLE = False
import os
import string
import sys
import tarfile
UNTRUSTED_PREFIXES = tuple(["/", "\\"] +
[c + ":" for c in string.ascii_letters])
def main():
if len(sys.argv) != 2 or sys.argv[1] in {"-h", "--help"}:
error("usage: untar.py archive.{{tar,{0}tar.gz}}".format(
"tar.bz2," if BZ2_AVAILABLE else ""), 2)
archive = sys.argv[1]
if not archive.lower().endswith((".tar", ".tar.gz", ".tar.bz2")):
error("{0} doesn't appear to be a tarball".format(archive))
if not BZ2_AVAILABLE and archive.lower().endswith(".bz2"):
error("bzip2 decompression is not available")
if not os.path.exists(archive):
error("{0} doesn't appear to exist".format(archive))
untar(archive)
def untar(archive):
tar = None
try:
tar = tarfile.open(archive)
for member in tar.getmembers():
if member.name.startswith(UNTRUSTED_PREFIXES):
print("untrusted prefix, ignoring", member.name)
elif ".." in member.name:
print("suspect path, ignoring", member.name)
else:
tar.extract(member)
print("unpacked", member.name)
except (tarfile.TarError, EnvironmentError) as err:
error(err)
finally:
if tar is not None:
tar.close()
def error(message, exit_status=1):
print(message)
sys.exit(exit_status)
main()
| mit |
NeCTAR-RC/neutron | neutron/plugins/ml2/drivers/helpers.py | 39 | 6245 | # Copyright (c) 2014 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
IDPOOL_SELECT_SIZE = 100
class BaseTypeDriver(api.TypeDriver):
"""BaseTypeDriver for functions common to Segment and flat."""
def __init__(self):
try:
self.physnet_mtus = utils.parse_mappings(
cfg.CONF.ml2.physical_network_mtus
)
except Exception:
self.physnet_mtus = []
def get_mtu(self, physical_network=None):
return cfg.CONF.ml2.segment_mtu
class SegmentTypeDriver(BaseTypeDriver):
"""SegmentTypeDriver for segment allocation.
Provide methods helping to perform segment allocation fully or partially
specified.
"""
def __init__(self, model):
super(SegmentTypeDriver, self).__init__()
self.model = model
self.primary_keys = set(dict(model.__table__.columns))
self.primary_keys.remove("allocated")
def allocate_fully_specified_segment(self, session, **raw_segment):
"""Allocate segment fully specified by raw_segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed, then return None
"""
network_type = self.get_type()
try:
with session.begin(subtransactions=True):
alloc = (session.query(self.model).filter_by(**raw_segment).
first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
LOG.debug("%(type)s segment %(segment)s allocate "
"started ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment %(segment)s allocate "
"done ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated or deleted since select
LOG.debug("%(type)s segment %(segment)s allocate "
"failed: segment has been allocated or "
"deleted",
{"type": network_type,
"segment": raw_segment})
# Segment to create or already allocated
LOG.debug("%(type)s segment %(segment)s create started",
{"type": network_type, "segment": raw_segment})
alloc = self.model(allocated=True, **raw_segment)
alloc.save(session)
LOG.debug("%(type)s segment %(segment)s create done",
{"type": network_type, "segment": raw_segment})
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
LOG.debug("%(type)s segment %(segment)s create failed",
{"type": network_type, "segment": raw_segment})
return alloc
def allocate_partially_specified_segment(self, session, **filters):
"""Allocate model segment from pool partially specified by filters.
Return allocated db object or None.
"""
network_type = self.get_type()
with session.begin(subtransactions=True):
select = (session.query(self.model).
filter_by(allocated=False, **filters))
# Selected segment can be allocated before update by someone else,
allocs = select.limit(IDPOOL_SELECT_SIZE).all()
if not allocs:
# No resource available
return
alloc = random.choice(allocs)
raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
LOG.debug("%(type)s segment allocate from pool "
"started with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment allocate from pool "
"success with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated since select
LOG.debug("Allocate %(type)s segment from pool "
"failed with segment %(segment)s",
{"type": network_type,
"segment": raw_segment})
# saving real exception in case we exceeded amount of attempts
raise db_exc.RetryRequest(
exc.NoNetworkFoundInMaximumAllowedAttempts())
| apache-2.0 |
pandeyop/tempest | tempest/api/compute/servers/test_multiple_create_negative.py | 11 | 2936 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import test
class MultipleCreateNegativeTestJSON(base.BaseV2ComputeTest):
_name = 'multiple-create-test'
def _generate_name(self):
return data_utils.rand_name(self._name)
def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
"""
This is the right way to create_multiple servers and manage to get the
created servers into the servers list to be cleaned up after all.
"""
kwargs['name'] = kwargs.get('name', self._generate_name())
body = self.create_test_server(**kwargs)
return body
@test.attr(type=['negative'])
@test.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
def test_min_count_less_than_one(self):
invalid_min_count = 0
self.assertRaises(lib_exc.BadRequest, self._create_multiple_servers,
min_count=invalid_min_count)
@test.attr(type=['negative'])
@test.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
def test_min_count_non_integer(self):
invalid_min_count = 2.5
self.assertRaises(lib_exc.BadRequest, self._create_multiple_servers,
min_count=invalid_min_count)
@test.attr(type=['negative'])
@test.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
def test_max_count_less_than_one(self):
invalid_max_count = 0
self.assertRaises(lib_exc.BadRequest, self._create_multiple_servers,
max_count=invalid_max_count)
@test.attr(type=['negative'])
@test.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
def test_max_count_non_integer(self):
invalid_max_count = 2.5
self.assertRaises(lib_exc.BadRequest, self._create_multiple_servers,
max_count=invalid_max_count)
@test.attr(type=['negative'])
@test.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
def test_max_count_less_than_min_count(self):
min_count = 3
max_count = 2
self.assertRaises(lib_exc.BadRequest, self._create_multiple_servers,
min_count=min_count,
max_count=max_count)
| apache-2.0 |
CEG-FYP-OpenStack/scheduler | nova/tests/functional/wsgi/test_flavor_manage.py | 40 | 9071 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nova import context
from nova import db
from nova import exception as ex
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers as helper
from nova.tests.unit import policy_fixture
def rand_flavor(**kwargs):
flav = {
'name': 'name-%s' % helper.generate_random_alphanumeric(10),
'id': helper.generate_random_alphanumeric(10),
'ram': int(helper.generate_random_numeric(2)) + 1,
'disk': int(helper.generate_random_numeric(3)),
'vcpus': int(helper.generate_random_numeric(1)) + 1,
}
flav.update(kwargs)
return flav
class FlavorManageFullstack(test.TestCase):
"""Tests for flavors manage administrative command.
Extension: os-flavors-manage
os-flavors-manage adds a set of admin functions to the flavors
resource for the creation and deletion of flavors.
POST /v2/flavors:
::
{
'name': NAME, # string, required unique
'id': ID, # string, required unique
'ram': RAM, # in MB, required
'vcpus': VCPUS, # int value, required
'disk': DISK, # in GB, required
'OS-FLV-EXT-DATA:ephemeral', # in GB, ephemeral disk size
'is_public': IS_PUBLIC, # boolean
'swap': SWAP, # in GB?
'rxtx_factor': RXTX, # ???
}
Returns Flavor
DELETE /v2/flavors/ID
Functional Test Scope:
This test starts the wsgi stack for the nova api services, uses an
in memory database to ensure the path through the wsgi layer to
the database.
"""
def setUp(self):
super(FlavorManageFullstack, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture())
# NOTE(sdague): because this test is primarily an admin API
# test default self.api to the admin api.
self.api = api_fixture.admin_api
self.user_api = api_fixture.api
def assertFlavorDbEqual(self, flav, flavdb):
# a mapping of the REST params to the db fields
mapping = {
'name': 'name',
'disk': 'root_gb',
'ram': 'memory_mb',
'vcpus': 'vcpus',
'id': 'flavorid',
'swap': 'swap'
}
for k, v in six.iteritems(mapping):
if k in flav:
self.assertEqual(flav[k], flavdb[v],
"%s != %s" % (flav, flavdb))
def assertFlavorAPIEqual(self, flav, flavapi):
# for all keys in the flavor, ensure they are correctly set in
# flavapi response.
for k, v in six.iteritems(flav):
if k in flavapi:
self.assertEqual(flav[k], flavapi[k],
"%s != %s" % (flav, flavapi))
else:
self.fail("Missing key: %s in flavor: %s" % (k, flavapi))
def assertFlavorInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.assertEqual(flav['name'], item['name'])
return
self.fail("%s not found in %s" % (flav, flavlist))
def assertFlavorNotInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.fail("%s found in %s" % (flav, flavlist))
def test_flavor_manage_func_negative(self):
"""Test flavor manage edge conditions.
- Bogus body is a 400
- Unknown flavor is a 404
- Deleting unknown flavor is a 404
"""
# Test for various API failure conditions
# bad body is 400
resp = self.api.api_post('flavors', '', check_response_status=False)
self.assertEqual(400, resp.status)
# get unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
# delete unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
ctx = context.get_admin_context()
# bounds conditions - invalid vcpus
flav = {'flavor': rand_flavor(vcpus=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status, resp)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav['flavor']['id'])
# bounds conditions - invalid ram
flav = {'flavor': rand_flavor(ram=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav['flavor']['id'])
# NOTE(sdague): if there are other bounds conditions that
# should be checked, stack them up here.
def test_flavor_manage_deleted(self):
"""Ensure the behavior around a deleted flavor is stable.
- Fetching a deleted flavor works, and returns the flavor info.
- Listings should not contain deleted flavors
"""
# create a deleted flavor
new_flav = {'flavor': rand_flavor()}
self.api.api_post('flavors', new_flav)
self.api.api_delete('flavors/%s' % new_flav['flavor']['id'])
# It is valid to directly fetch details of a deleted flavor
resp = self.api.api_get('flavors/%s' % new_flav['flavor']['id'])
self.assertEqual(200, resp.status)
self.assertFlavorAPIEqual(new_flav['flavor'], resp.body['flavor'])
# deleted flavor should not show up in a list
resp = self.api.api_get('flavors')
self.assertFlavorNotInList(new_flav['flavor'], resp.body)
def test_flavor_manage_func(self):
"""Basic flavor creation lifecycle testing.
- Creating a flavor
- Ensure it's in the database
- Ensure it's in the listing
- Delete it
- Ensure it's hidden in the database
"""
ctx = context.get_admin_context()
flav1 = {
'flavor': rand_flavor(),
}
# Create flavor and ensure it made it to the database
self.api.api_post('flavors', flav1)
flav1db = db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
self.assertFlavorDbEqual(flav1['flavor'], flav1db)
# Ensure new flavor is seen in the listing
resp = self.api.api_get('flavors')
self.assertFlavorInList(flav1['flavor'], resp.body)
# Delete flavor and ensure it was removed from the database
self.api.api_delete('flavors/%s' % flav1['flavor']['id'])
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav1['flavor']['id'])
resp = self.api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(404, resp.status)
def test_flavor_manage_permissions(self):
"""Ensure that regular users can't create or delete flavors.
"""
ctx = context.get_admin_context()
flav1 = {'flavor': rand_flavor()}
# Ensure user can't create flavor
resp = self.user_api.api_post('flavors', flav1,
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and that it didn't leak through
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav1['flavor']['id'])
# Create the flavor as the admin user
self.api.api_post('flavors', flav1)
# Ensure user can't delete flavors from our cloud
resp = self.user_api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and ensure that we didn't actually delete the flavor,
# this will throw an exception if we did.
db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
| apache-2.0 |
hainm/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
crafty78/ansible | lib/ansible/plugins/callback/skippy.py | 116 | 1317 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
| gpl-3.0 |
jjimenezg93/ai-state_machines | moai/3rdparty/freetype-2.4.4/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| mit |
h-hirokawa/ansible | test/integration/cleanup_rax.py | 229 | 6516 | #!/usr/bin/env python
import os
import re
import yaml
import argparse
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_list_iterator(svc, *args, **kwargs):
method = kwargs.pop('method', 'list')
items = getattr(svc, method)(*args, **kwargs)
while items:
retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs)
if items and retrieved and items[-1].id == retrieved[0].id:
del items[-1]
items.extend(retrieved)
if len(retrieved) < 2:
break
return items
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes',
default=False, help="Don't prompt for confirmation")
parser.add_argument('--match', dest='match_re',
default='^ansible-testing',
help='Regular expression used to find resources '
'(default: %(default)s)')
return parser.parse_args()
def authenticate():
try:
with open(os.path.realpath('./credentials.yml')) as f:
credentials = yaml.load(f)
except Exception as e:
raise SystemExit(e)
try:
pyrax.set_credentials(credentials.get('rackspace_username'),
credentials.get('rackspace_api_key'))
except Exception as e:
raise SystemExit(e)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \
"Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
print ("Deleted %s" % item)
if hasattr(item, 'terminate'):
item.terminate()
print ("Terminated %s" % item)
def delete_rax(args):
"""Function for deleting CloudServers"""
print ("--- Cleaning CloudServers matching '%s'" % args.match_re)
search_opts = dict(name='^%s' % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
servers = rax_list_iterator(cs.servers, search_opts=search_opts)
for server in servers:
prompt_and_delete(server,
'Delete matching %s? [y/n]: ' % server,
args.assumeyes)
def delete_rax_clb(args):
"""Function for deleting Cloud Load Balancers"""
print ("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re)
for region in pyrax.identity.services.load_balancer.regions:
clb = pyrax.connect_to_cloud_loadbalancers(region=region)
for lb in rax_list_iterator(clb):
if re.search(args.match_re, lb.name):
prompt_and_delete(lb,
'Delete matching %s? [y/n]: ' % lb,
args.assumeyes)
def delete_rax_keypair(args):
"""Function for deleting Rackspace Key pairs"""
print ("--- Cleaning Key Pairs matching '%s'" % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
for keypair in cs.keypairs.list():
if re.search(args.match_re, keypair.name):
prompt_and_delete(keypair,
'Delete matching %s? [y/n]: ' % keypair,
args.assumeyes)
def delete_rax_network(args):
"""Function for deleting Cloud Networks"""
print ("--- Cleaning Cloud Networks matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cnw = pyrax.connect_to_cloud_networks(region=region)
for network in cnw.list():
if re.search(args.match_re, network.name):
prompt_and_delete(network,
'Delete matching %s? [y/n]: ' % network,
args.assumeyes)
def delete_rax_cbs(args):
"""Function for deleting Cloud Networks"""
print ("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cbs = pyrax.connect_to_cloud_blockstorage(region=region)
for volume in cbs.list():
if re.search(args.match_re, volume.name):
prompt_and_delete(volume,
'Delete matching %s? [y/n]: ' % volume,
args.assumeyes)
def delete_rax_cdb(args):
"""Function for deleting Cloud Databases"""
print ("--- Cleaning Cloud Databases matching '%s'" % args.match_re)
for region in pyrax.identity.services.database.regions:
cdb = pyrax.connect_to_cloud_databases(region=region)
for db in rax_list_iterator(cdb):
if re.search(args.match_re, db.name):
prompt_and_delete(db,
'Delete matching %s? [y/n]: ' % db,
args.assumeyes)
def _force_delete_rax_scaling_group(manager):
def wrapped(uri):
manager.api.method_delete('%s?force=true' % uri)
return wrapped
def delete_rax_scaling_group(args):
"""Function for deleting Autoscale Groups"""
print ("--- Cleaning Autoscale Groups matching '%s'" % args.match_re)
for region in pyrax.identity.services.autoscale.regions:
asg = pyrax.connect_to_autoscale(region=region)
for group in rax_list_iterator(asg):
if re.search(args.match_re, group.name):
group.manager._delete = \
_force_delete_rax_scaling_group(group.manager)
prompt_and_delete(group,
'Delete matching %s? [y/n]: ' % group,
args.assumeyes)
def main():
if not HAS_PYRAX:
raise SystemExit('The pyrax python module is required for this script')
args = parse_args()
authenticate()
funcs = [f for n, f in globals().items() if n.startswith('delete_rax')]
for func in sorted(funcs, key=lambda f: f.__name__):
try:
func(args)
except Exception as e:
print ("---- %s failed (%s)" % (func.__name__, e.message))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print ('\nExiting...')
| gpl-3.0 |
mattt416/neutron | neutron/tests/unit/services/test_provider_configuration.py | 8 | 8733 | # Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests import base
class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ParseServiceProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_default_service_provider_configuration(self):
providers = cfg.CONF.service_providers.service_provider
self.assertEqual(providers, [])
def test_parse_single_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': False}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_single_default_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path:default'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': True}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_multi_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'])
res = provconf.parse_service_provider_opt()
# This parsing crosses repos if additional projects are installed,
# so check that at least what we expect is there; there may be more.
self.assertTrue(len(res) >= 3)
def test_parse_service_provider_invalid_format(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
self._set_override([constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_name_too_long(self):
name = 'a' * 256
self._set_override([constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
class ProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_ensure_driver_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_driver_unique, 'driver')
self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
def test_ensure_default_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_default_unique,
'svctype', True)
self.assertIsNone(pconf._ensure_default_unique('svctype', False))
self.assertIsNone(pconf._ensure_default_unique('svctype1', True))
self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
def test_add_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertEqual(len(pconf.providers), 1)
self.assertEqual(list(pconf.providers.keys()),
[(constants.LOADBALANCER, 'name')])
self.assertEqual(list(pconf.providers.values()),
[{'driver': 'path', 'default': False}])
def test_add_duplicate_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertRaises(n_exc.Invalid, pconf.add_provider, prov)
self.assertEqual(len(pconf.providers), 1)
def test_get_service_providers(self):
self._set_override([constants.LOADBALANCER + ':name:path',
constants.LOADBALANCER + ':name2:path2',
'st2:name:driver:default',
'st3:name2:driver2:default'])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False},
{'service_type': 'st2',
'name': 'name',
'driver': 'driver',
'default': True
},
{'service_type': 'st3',
'name': 'name2',
'driver': 'driver2',
'default': True}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']}
)
self.assertEqual(p, [prov])
def test_get_service_providers_with_fields(self):
self._set_override([constants.LOADBALANCER + ":name:path",
constants.LOADBALANCER + ":name2:path2"])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']},
fields=['name']
)
self.assertEqual(p, [{'name': prov['name']}])
class GetProviderDriverClassTestCase(base.BaseTestCase):
def test_get_provider_driver_class_hit(self):
driver = 'ml2'
expected = 'neutron.plugins.ml2.plugin.Ml2Plugin'
actual = provconf.get_provider_driver_class(
driver,
namespace=manager.CORE_PLUGINS_NAMESPACE)
self.assertEqual(expected, actual)
def test_get_provider_driver_class_miss(self):
retval = provconf.get_provider_driver_class('foo')
self.assertEqual('foo', retval)
| apache-2.0 |
mateoqac/unqTip | language/vxgbs/lang/gbs_constructs.py | 1 | 10125 | #
# Copyright (C) 2011, 2012 Pablo Barenbaum <foones@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Definition of program constructs, used in the lint step.
A program construct is any kind of value that an
identifier might take, such as "user-defined function"
or "built-in constant".
"""
import common.i18n as i18n
import common.position
class ProgramConstruct(object):
"""Base class to represent Gobstones constructs, such as constants,
variables, functions and procedures.
"""
def __init__(self, name):
self._name = name
def name(self):
"Returns the name of this construct."
return self._name
def where(self):
"""Returns a string, describing the place where this construct is
defined.
"""
return 'at unknown place'
def area(self):
"""Returns a program area indicating the place where this construct is
defined.
"""
return common.position.ProgramArea()
def underlying_construct(self):
"Returns the original construct (used for renames/aliases)."
return self
class RenameConstruct(ProgramConstruct):
"Represents a construct which is an alias for another one."
def __init__(self, new_name, value):
self._name = new_name
self._value = value
def underlying_construct(self):
"Returns the original construct, for which this one is an alias."
return self._value
## Construct kinds (callable and atomic)
class CallableConstruct(ProgramConstruct):
"Represents a construct that can be called (function or procedure)."
def kind(self):
"Returns the kind of this construct."
return 'callable'
def num_params(self):
"Returns the number of arguments of this construct."
return len(self.params())
class EntryPointConstruct(ProgramConstruct):
"""Represents a construct that cannot be called (entrypoint)"""
def kind(self):
"Returns the kind of this construct."
return 'entrypoint'
class AtomicConstruct(ProgramConstruct):
"""Represents an atomic construct, which is not a collection
and cannot be called.
"""
def kind(self):
"Returns the kind of this construct."
return 'atomic'
## Construct types (procedure, function, constant and variable)
class ProgramEntryPoint(EntryPointConstruct):
"""Represents a Gobstones program block"""
def type(self):
"Returns the type of this construct."
return 'program'
class InteractiveEntryPoint(EntryPointConstruct):
"""Represents a Gobstones interactive program block"""
def type(self):
"Returns the type of this construct."
return 'interactive'
class Procedure(CallableConstruct):
"Represents a Gobstones procedure."
def type(self):
"Returns the type of this construct."
return 'procedure'
class Function(CallableConstruct):
"Represents a Gobstones function."
def type(self):
"Returns the type of this construct."
return 'function'
class Constant(AtomicConstruct):
"Represents a Gobstones constant."
def type(self):
"Returns the type of this construct."
return 'constant'
class Variable(AtomicConstruct):
"Represents a Gobstones variable."
def type(self):
"Returns the type of this construct."
return 'variable'
class Type(AtomicConstruct):
"Represents a Gobstones Type"
def type(self):
"Returns the type of this construct"
return 'type'
##
class Builtin(ProgramConstruct):
"Represents a builtin construct, defined by the Gobstones runtime."
def __init__(self, name, gbstype, primitive):
ProgramConstruct.__init__(self, name)
self._gbstype = gbstype
self._primitive = primitive
def gbstype(self):
"Returns the Gobstones type of this construct."
return self._gbstype
def where(self):
"""Returns a string, describing the place where this construct is
defined.
"""
return i18n.i18n('as a built-in')
def is_builtin(self):
"""Returns True iff this construct is a builtin. (Builtin constructs
always return True).
"""
return True
def primitive(self):
"Returns the denotated value of this construct."
return self._primitive
class BuiltinType(Builtin, Type):
def __init__(self, name, gbstype):
super(BuiltinType, self).__init__(name, gbstype, gbstype)
class BuiltinCallable(Builtin):
"""Represents a callable builtin construct, a procedure or function
defined by the Gobstones runtime.
"""
def __init__(self, name, gbstype, primitive):
Builtin.__init__(self, name, gbstype, primitive)
self._params = [repr(p).lower() for p in gbstype.parameters()]
def params(self):
"""Return a list of parameter names for this builtin callable
construct. The names are taken from its types. E.g.: Poner(color).
"""
return self._params
class BuiltinProcedure(BuiltinCallable, Procedure):
"Represents a builtin procedure."
pass
class BuiltinFunction(BuiltinCallable, Function):
"Represents a builtin function."
def __init__(self, name, gbstype, primitive):
BuiltinCallable.__init__(self, name, gbstype, primitive)
self._nretvals = len(gbstype.result())
def num_retvals(self):
"Returns the number of values that this function returns."
return self._nretvals
class BuiltinFieldGetter(BuiltinFunction):
def __init__(self, name, gbstype, primitive=None):
super(BuiltinFieldGetter, self).__init__(name, gbstype, primitive)
class BuiltinConstant(Builtin, Constant):
"Represents a builtin constant."
pass
##
class UserConstruct(ProgramConstruct):
"Represents a user-defined construct."
def __init__(self, name, tree):
ProgramConstruct.__init__(self, name)
self._tree = tree
def tree(self):
"""Returns the AST corresponding to this user-defined construct's
definition."""
return self._tree
def where(self):
"""Returns a string, describing the place where this construct is
defined.
"""
return i18n.i18n('at %s') % (self._tree.pos_begin.file_row_col(),)
def area(self):
"""Returns a program area indicating the place where this user-defined
construct is defined.
"""
return common.position.ProgramAreaNear(self._tree)
def is_builtin(self):
"""Returns True iff this construct is a builtin. (User defined
constructs always return False).
"""
return False
class UserEntryPoint(UserConstruct):
"Represents a user-defined entrypoint construct."
def identifier(self):
return self.tree().children[1]
class UserProgramEntryPoint(UserEntryPoint, ProgramEntryPoint):
"Represents a user-defined program entrypoint."
pass
class UserInteractiveEntryPoint(UserEntryPoint, ProgramEntryPoint):
"Represents a user-defined interactive program entrypoint."
pass
class UserCallable(UserConstruct):
"Represents a user-defined callable construct."
def params(self):
"""Return a list of parameter names for this user-defined callable
construct. The names are taken from the callable construct's source code.
"""
return [p.value for p in self.tree().children[2].children]
def identifier(self):
return self.tree().children[1]
class UserProcedure(UserCallable, Procedure):
"Represents a user-defined procedure."
pass
class UserFunction(UserCallable, Function):
"Represents a user-defined function."
def num_retvals(self):
"""Returns the number of values that this user-defined function
returns.
"""
body = self.tree().children[3]
return_clause = body.children[-1]
tup = return_clause.children[1]
return len(tup.children)
class UserVariable(UserConstruct, Variable):
"Represents a user-defined variable."
def identifier(self):
return self.tree()
class UserType(UserConstruct, Type):
"Represents a user-defined type."
def identifier(self):
return self.tree().children[1]
class UserParameter(UserVariable):
"Represents a parameter in a user-defined routine."
def type(self):
"Returns the type of this construct."
return 'parameter'
class UserIndex(UserVariable):
"Represents an index in a repeatWith, repeat or foreach."
def type(self):
"Returns the type of this construct."
return 'index'
## Compiled constructs
class UserCompiledConstruct(ProgramConstruct):
"Represents a compiled construct."
def __init__(self, name):
ProgramConstruct.__init__(self, name)
def is_builtin(self):
"""Returns True iff this construct is a builtin. (Compiled
constructs always return False).
"""
return False
class UserCompiledCallable(ProgramConstruct):
"Represents a compiled callable construct."
def __init__(self, name, params):
ProgramConstruct.__init__(self, name)
self._params = params
def params(self):
"Return the list of names of this compiled callable construct."
return self._params
class UserCompiledProcedure(UserCompiledCallable, Procedure):
"Represents a compiled callable procedure."
pass
class UserCompiledFunction(UserCompiledCallable, Function):
"Represents a compiled callable function."
pass
class UserCompiledEntrypoint(UserCompiledConstruct, UserEntryPoint):
"Represents a compiled entrypoint."
pass | gpl-3.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.py | 126 | 1044 | ''' Tests for byteorder module '''
from __future__ import division, print_function, absolute_import
import sys
from numpy.testing import assert_raises, assert_, run_module_suite
import scipy.io.matlab.byteordercodes as sibc
def test_native():
native_is_le = sys.byteorder == 'little'
assert_(sibc.sys_is_le == native_is_le)
def test_to_numpy():
if sys.byteorder == 'little':
assert_(sibc.to_numpy_code('native') == '<')
assert_(sibc.to_numpy_code('swapped') == '>')
else:
assert_(sibc.to_numpy_code('native') == '>')
assert_(sibc.to_numpy_code('swapped') == '<')
assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
assert_(sibc.to_numpy_code('big') == '>')
for code in ('little', '<', 'l', 'L', 'le'):
assert_(sibc.to_numpy_code(code) == '<')
for code in ('big', '>', 'b', 'B', 'be'):
assert_(sibc.to_numpy_code(code) == '>')
assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
if __name__ == "__main__":
run_module_suite()
| mit |
incorrectusername/coala-bears | bears/c_languages/codeclone_detection/ClangCloneDetectionBear.py | 23 | 1992 | from bears.c_languages.ClangBear import clang_available, ClangBear
from bears.c_languages.codeclone_detection.ClangFunctionDifferenceBear import (
ClangFunctionDifferenceBear)
from coalib.bears.GlobalBear import GlobalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class ClangCloneDetectionBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS
CAN_DETECT = {'Duplication'}
BEAR_DEPS = {ClangFunctionDifferenceBear}
def run(self,
dependency_results: dict,
max_clone_difference: float=0.185):
'''
Checks the given code for similar functions that are probably
redundant.
:param max_clone_difference: The maximum difference a clone should
have.
'''
differences = dependency_results[
ClangFunctionDifferenceBear.__name__][0].contents
count_matrices = dependency_results[
ClangFunctionDifferenceBear.__name__][1].contents
self.debug('Creating results...')
for function_1, function_2, difference in differences:
if difference < max_clone_difference:
yield Result.from_values(
self,
'Code clone found. The other occurrence is at file '
'{file}, line {line}, function {function}. The '
'difference is {difference}%.'.format(
file=function_2[0],
line=function_2[1],
function=function_2[2],
difference=difference),
file=function_1[0],
severity=RESULT_SEVERITY.MAJOR,
line=function_1[1],
debug_msg=[count_matrices[function_1],
count_matrices[function_2]])
| agpl-3.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py | 85 | 1182 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CholeskyOuterProduct bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.cholesky_outer_product_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["CholeskyOuterProduct"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
sontek/rethinkdb | external/v8_3.30.33.16/tools/v8heapconst.py | 40 | 11667 | # Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is automatically generated from the V8 source and should not
# be modified manually, run 'make grokdump' instead to update this file.
# List of known V8 instance types.
INSTANCE_TYPES = {
64: "STRING_TYPE",
68: "ONE_BYTE_STRING_TYPE",
65: "CONS_STRING_TYPE",
69: "CONS_ONE_BYTE_STRING_TYPE",
67: "SLICED_STRING_TYPE",
71: "SLICED_ONE_BYTE_STRING_TYPE",
66: "EXTERNAL_STRING_TYPE",
70: "EXTERNAL_ONE_BYTE_STRING_TYPE",
74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
82: "SHORT_EXTERNAL_STRING_TYPE",
86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
0: "INTERNALIZED_STRING_TYPE",
4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
1: "CONS_INTERNALIZED_STRING_TYPE",
5: "CONS_ONE_BYTE_INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "MAP_TYPE",
130: "CODE_TYPE",
131: "ODDBALL_TYPE",
132: "CELL_TYPE",
133: "PROPERTY_CELL_TYPE",
134: "HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "FREE_SPACE_TYPE",
138: "EXTERNAL_INT8_ARRAY_TYPE",
139: "EXTERNAL_UINT8_ARRAY_TYPE",
140: "EXTERNAL_INT16_ARRAY_TYPE",
141: "EXTERNAL_UINT16_ARRAY_TYPE",
142: "EXTERNAL_INT32_ARRAY_TYPE",
143: "EXTERNAL_UINT32_ARRAY_TYPE",
144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
147: "FIXED_INT8_ARRAY_TYPE",
148: "FIXED_UINT8_ARRAY_TYPE",
149: "FIXED_INT16_ARRAY_TYPE",
150: "FIXED_UINT16_ARRAY_TYPE",
151: "FIXED_INT32_ARRAY_TYPE",
152: "FIXED_UINT32_ARRAY_TYPE",
153: "FIXED_FLOAT32_ARRAY_TYPE",
154: "FIXED_FLOAT64_ARRAY_TYPE",
155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
157: "FILLER_TYPE",
158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
159: "DECLARED_ACCESSOR_INFO_TYPE",
160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
161: "ACCESSOR_PAIR_TYPE",
162: "ACCESS_CHECK_INFO_TYPE",
163: "INTERCEPTOR_INFO_TYPE",
164: "CALL_HANDLER_INFO_TYPE",
165: "FUNCTION_TEMPLATE_INFO_TYPE",
166: "OBJECT_TEMPLATE_INFO_TYPE",
167: "SIGNATURE_INFO_TYPE",
168: "TYPE_SWITCH_INFO_TYPE",
170: "ALLOCATION_MEMENTO_TYPE",
169: "ALLOCATION_SITE_TYPE",
171: "SCRIPT_TYPE",
172: "CODE_CACHE_TYPE",
173: "POLYMORPHIC_CODE_CACHE_TYPE",
174: "TYPE_FEEDBACK_INFO_TYPE",
175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
176: "BOX_TYPE",
179: "FIXED_ARRAY_TYPE",
156: "FIXED_DOUBLE_ARRAY_TYPE",
180: "CONSTANT_POOL_ARRAY_TYPE",
181: "SHARED_FUNCTION_INFO_TYPE",
182: "JS_MESSAGE_OBJECT_TYPE",
185: "JS_VALUE_TYPE",
186: "JS_DATE_TYPE",
187: "JS_OBJECT_TYPE",
188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
189: "JS_GENERATOR_OBJECT_TYPE",
190: "JS_MODULE_TYPE",
191: "JS_GLOBAL_OBJECT_TYPE",
192: "JS_BUILTINS_OBJECT_TYPE",
193: "JS_GLOBAL_PROXY_TYPE",
194: "JS_ARRAY_TYPE",
195: "JS_ARRAY_BUFFER_TYPE",
196: "JS_TYPED_ARRAY_TYPE",
197: "JS_DATA_VIEW_TYPE",
184: "JS_PROXY_TYPE",
198: "JS_SET_TYPE",
199: "JS_MAP_TYPE",
200: "JS_WEAK_MAP_TYPE",
201: "JS_WEAK_SET_TYPE",
202: "JS_REGEXP_TYPE",
203: "JS_FUNCTION_TYPE",
183: "JS_FUNCTION_PROXY_TYPE",
177: "DEBUG_INFO_TYPE",
178: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "OneByteInternalizedStringMap"),
0x08121: (179, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
0x08199: (157, "OnePointerFillerMap"),
0x081c1: (157, "TwoPointerFillerMap"),
0x081e9: (132, "CellMap"),
0x08211: (133, "GlobalPropertyCellMap"),
0x08239: (181, "SharedFunctionInfoMap"),
0x08261: (179, "NativeContextMap"),
0x08289: (130, "CodeMap"),
0x082b1: (179, "ScopeInfoMap"),
0x082d9: (179, "FixedCOWArrayMap"),
0x08301: (156, "FixedDoubleArrayMap"),
0x08329: (180, "ConstantPoolArrayMap"),
0x08351: (179, "HashTableMap"),
0x08379: (128, "SymbolMap"),
0x083a1: (64, "StringMap"),
0x083c9: (68, "OneByteStringMap"),
0x083f1: (65, "ConsStringMap"),
0x08419: (69, "ConsOneByteStringMap"),
0x08441: (67, "SlicedStringMap"),
0x08469: (71, "SlicedOneByteStringMap"),
0x08491: (66, "ExternalStringMap"),
0x084b9: (74, "ExternalStringWithOneByteDataMap"),
0x084e1: (70, "ExternalOneByteStringMap"),
0x08509: (82, "ShortExternalStringMap"),
0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
0x08559: (0, "InternalizedStringMap"),
0x08581: (1, "ConsInternalizedStringMap"),
0x085a9: (5, "ConsOneByteInternalizedStringMap"),
0x085d1: (2, "ExternalInternalizedStringMap"),
0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
0x08621: (6, "ExternalOneByteInternalizedStringMap"),
0x08649: (18, "ShortExternalInternalizedStringMap"),
0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
0x08699: (22, "ShortExternalOneByteInternalizedStringMap"),
0x086c1: (86, "ShortExternalOneByteStringMap"),
0x086e9: (64, "UndetectableStringMap"),
0x08711: (68, "UndetectableOneByteStringMap"),
0x08739: (138, "ExternalInt8ArrayMap"),
0x08761: (139, "ExternalUint8ArrayMap"),
0x08789: (140, "ExternalInt16ArrayMap"),
0x087b1: (141, "ExternalUint16ArrayMap"),
0x087d9: (142, "ExternalInt32ArrayMap"),
0x08801: (143, "ExternalUint32ArrayMap"),
0x08829: (144, "ExternalFloat32ArrayMap"),
0x08851: (145, "ExternalFloat64ArrayMap"),
0x08879: (146, "ExternalUint8ClampedArrayMap"),
0x088a1: (148, "FixedUint8ArrayMap"),
0x088c9: (147, "FixedInt8ArrayMap"),
0x088f1: (150, "FixedUint16ArrayMap"),
0x08919: (149, "FixedInt16ArrayMap"),
0x08941: (152, "FixedUint32ArrayMap"),
0x08969: (151, "FixedInt32ArrayMap"),
0x08991: (153, "FixedFloat32ArrayMap"),
0x089b9: (154, "FixedFloat64ArrayMap"),
0x089e1: (155, "FixedUint8ClampedArrayMap"),
0x08a09: (179, "NonStrictArgumentsElementsMap"),
0x08a31: (179, "FunctionContextMap"),
0x08a59: (179, "CatchContextMap"),
0x08a81: (179, "WithContextMap"),
0x08aa9: (179, "BlockContextMap"),
0x08ad1: (179, "ModuleContextMap"),
0x08af9: (179, "GlobalContextMap"),
0x08b21: (182, "JSMessageObjectMap"),
0x08b49: (135, "ForeignMap"),
0x08b71: (187, "NeanderMap"),
0x08b99: (170, "AllocationMementoMap"),
0x08bc1: (169, "AllocationSiteMap"),
0x08be9: (173, "PolymorphicCodeCacheMap"),
0x08c11: (171, "ScriptMap"),
0x08c61: (187, "ExternalMap"),
0x08cb1: (176, "BoxMap"),
0x08cd9: (158, "DeclaredAccessorDescriptorMap"),
0x08d01: (159, "DeclaredAccessorInfoMap"),
0x08d29: (160, "ExecutableAccessorInfoMap"),
0x08d51: (161, "AccessorPairMap"),
0x08d79: (162, "AccessCheckInfoMap"),
0x08da1: (163, "InterceptorInfoMap"),
0x08dc9: (164, "CallHandlerInfoMap"),
0x08df1: (165, "FunctionTemplateInfoMap"),
0x08e19: (166, "ObjectTemplateInfoMap"),
0x08e41: (167, "SignatureInfoMap"),
0x08e69: (168, "TypeSwitchInfoMap"),
0x08e91: (172, "CodeCacheMap"),
0x08eb9: (174, "TypeFeedbackInfoMap"),
0x08ee1: (175, "AliasedArgumentsEntryMap"),
0x08f09: (177, "DebugInfoMap"),
0x08f31: (178, "BreakPointInfoMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x08081): "NullValue",
("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
("OLD_POINTER_SPACE", 0x080a1): "TheHoleValue",
("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
("OLD_POINTER_SPACE", 0x09521): "TerminationException",
("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
("OLD_POINTER_SPACE", 0x0ca65): "MegamorphicSymbol",
("OLD_POINTER_SPACE", 0x0ca75): "UninitializedSymbol",
("OLD_POINTER_SPACE", 0x10ae9): "NonMonomorphicCache",
("OLD_POINTER_SPACE", 0x110fd): "PolymorphicCodeCache",
("OLD_POINTER_SPACE", 0x11105): "NativesSourceCache",
("OLD_POINTER_SPACE", 0x11155): "EmptyScript",
("OLD_POINTER_SPACE", 0x11189): "IntrinsicFunctionNames",
("OLD_POINTER_SPACE", 0x141a5): "ObservationState",
("OLD_POINTER_SPACE", 0x141b1): "FrozenSymbol",
("OLD_POINTER_SPACE", 0x141c1): "NonExistentSymbol",
("OLD_POINTER_SPACE", 0x141d1): "ElementsTransitionSymbol",
("OLD_POINTER_SPACE", 0x141e1): "EmptySlowElementDictionary",
("OLD_POINTER_SPACE", 0x1437d): "ObservedSymbol",
("OLD_POINTER_SPACE", 0x1438d): "AllocationSitesScratchpad",
("OLD_POINTER_SPACE", 0x14795): "MicrotaskState",
("OLD_POINTER_SPACE", 0x36241): "StringTable",
("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
("OLD_DATA_SPACE", 0x080a9): "NanValue",
("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
("OLD_DATA_SPACE", 0x08149): "EmptyConstantPoolArray",
("OLD_DATA_SPACE", 0x0828d): "EmptyExternalInt8Array",
("OLD_DATA_SPACE", 0x08299): "EmptyExternalUint8Array",
("OLD_DATA_SPACE", 0x082a5): "EmptyExternalInt16Array",
("OLD_DATA_SPACE", 0x082b1): "EmptyExternalUint16Array",
("OLD_DATA_SPACE", 0x082bd): "EmptyExternalInt32Array",
("OLD_DATA_SPACE", 0x082c9): "EmptyExternalUint32Array",
("OLD_DATA_SPACE", 0x082d5): "EmptyExternalFloat32Array",
("OLD_DATA_SPACE", 0x082e1): "EmptyExternalFloat64Array",
("OLD_DATA_SPACE", 0x082ed): "EmptyExternalUint8ClampedArray",
("OLD_DATA_SPACE", 0x082f9): "InfinityValue",
("OLD_DATA_SPACE", 0x08305): "MinusZeroValue",
("CODE_SPACE", 0x138e1): "JsConstructEntryCode",
("CODE_SPACE", 0x21361): "JsEntryCode",
}
| agpl-3.0 |
soarpenguin/ansible | lib/ansible/modules/cloud/centurylink/clc_publicip.py | 8 | 12101 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
version_added: "2.0"
options:
protocol:
description:
- The protocol that the public IP will listen for.
default: TCP
choices: ['TCP', 'UDP', 'ICMP']
required: False
ports:
description:
- A list of ports to expose. This is required when state is 'present'
required: False
default: None
server_ids:
description:
- A list of servers to create public ips on.
required: True
state:
description:
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
already exists.
default: present
choices: ['present', 'absent']
required: False
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Add Public IP to Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
protocol: TCP
ports:
- 80
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: present
register: clc
- name: debug
debug:
var: clc
- name: Delete Public IP from Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcPublicIp(object):
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
params = self.module.params
server_ids = params['server_ids']
ports = params['ports']
protocol = params['protocol']
state = params['state']
if state == 'present':
changed, changed_server_ids, requests = self.ensure_public_ip_present(
server_ids=server_ids, protocol=protocol, ports=ports)
elif state == 'absent':
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
server_ids=server_ids)
else:
return self.module.fail_json(msg="Unknown State: " + state)
self._wait_for_requests_to_complete(requests)
return self.module.exit_json(changed=changed,
server_ids=changed_server_ids)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
ports=dict(type='list'),
wait=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
def ensure_public_ip_present(self, server_ids, protocol, ports):
"""
Ensures the given server ids having the public ip available
:param server_ids: the list of server ids
:param protocol: the ip protocol
:param ports: the list of ports to expose
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) == 0]
ports_to_expose = [{'protocol': protocol, 'port': port}
for port in ports]
for server in servers_to_change:
if not self.module.check_mode:
result = self._add_publicip_to_server(server, ports_to_expose)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _add_publicip_to_server(self, server, ports_to_expose):
result = None
try:
result = server.PublicIPs().Add(ports_to_expose)
except CLCException as ex:
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_public_ip_absent(self, server_ids):
"""
Ensures the given server ids having the public ip removed if there is any
:param server_ids: the list of server ids
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) > 0]
for server in servers_to_change:
if not self.module.check_mode:
result = self._remove_publicip_from_server(server)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _remove_publicip_from_server(self, server):
result = None
try:
for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process public ip request')
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_ids, message):
"""
Gets list of servers form CLC api
"""
try:
return self.clc.v2.Servers(server_ids).servers
except CLCException as exception:
self.module.fail_json(msg=message + ': %s' % exception)
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcPublicIp._define_module_argument_spec(),
supports_check_mode=True
)
clc_public_ip = ClcPublicIp(module)
clc_public_ip.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
4shadoww/hakkuframework | core/lib/dns/namedict.py | 16 | 3677 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
# Copyright (C) 2016 Coresec Systems AB
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import collections
import dns.name
from ._compat import xrange
class NameDict(collections.MutableMapping):
"""A dictionary whose keys are dns.name.Name objects.
@ivar max_depth: the maximum depth of the keys that have ever been
added to the dictionary.
@type max_depth: int
@ivar max_depth_items: the number of items of maximum depth
@type max_depth_items: int
"""
__slots__ = ["max_depth", "max_depth_items", "__store"]
def __init__(self, *args, **kwargs):
self.__store = dict()
self.max_depth = 0
self.max_depth_items = 0
self.update(dict(*args, **kwargs))
def __update_max_depth(self, key):
if len(key) == self.max_depth:
self.max_depth_items = self.max_depth_items + 1
elif len(key) > self.max_depth:
self.max_depth = len(key)
self.max_depth_items = 1
def __getitem__(self, key):
return self.__store[key]
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
self.__store[key] = value
self.__update_max_depth(key)
def __delitem__(self, key):
value = self.__store.pop(key)
if len(value) == self.max_depth:
self.max_depth_items = self.max_depth_items - 1
if self.max_depth_items == 0:
self.max_depth = 0
for k in self.__store:
self.__update_max_depth(k)
def __iter__(self):
return iter(self.__store)
def __len__(self):
return len(self.__store)
def has_key(self, key):
return key in self.__store
def get_deepest_match(self, name):
"""Find the deepest match to I{name} in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of I{name}.
@param name: the name
@type name: dns.name.Name object
@rtype: (key, value) tuple
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if n in self:
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)
| mit |
lamby/python-social-auth | social/backends/disqus.py | 70 | 1811 | """
Disqus OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/disqus.html
"""
from social.backends.oauth import BaseOAuth2
class DisqusOAuth2(BaseOAuth2):
name = 'disqus'
AUTHORIZATION_URL = 'https://disqus.com/api/oauth/2.0/authorize/'
ACCESS_TOKEN_URL = 'https://disqus.com/api/oauth/2.0/access_token/'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('avatar', 'avatar'),
('connections', 'connections'),
('user_id', 'user_id'),
('email', 'email'),
('email_hash', 'emailHash'),
('expires', 'expires'),
('location', 'location'),
('meta', 'response'),
('name', 'name'),
('username', 'username'),
]
def get_user_id(self, details, response):
return response['response']['id']
def get_user_details(self, response):
"""Return user details from Disqus account"""
rr = response.get('response', {})
return {
'username': rr.get('username', ''),
'user_id': response.get('user_id', ''),
'email': rr.get('email', ''),
'name': rr.get('name', ''),
}
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
meta_response = dict(response, **response.get('response', {}))
return super(DisqusOAuth2, self).extra_data(user, uid, meta_response,
details, *args, **kwargs)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
key, secret = self.get_key_and_secret()
return self.get_json(
'https://disqus.com/api/3.0/users/details.json',
params={'access_token': access_token, 'api_secret': secret}
)
| bsd-3-clause |
ECYS-USAC/cpfecys | src/cpfecys/languages/fr-ca.py | 163 | 8203 | # coding: utf8
{
'!langcode!': 'fr-ca',
'!langname!': 'Français (Canadien)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s rangées supprimées',
'%s %%{row} updated': '%s rangées mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'Administrative Interface': 'Administrative Interface',
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s select',
'db': 'db',
'DB Model': 'Modèle DB',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement ',
'Description': 'Descriptif',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'entrer un entier compris entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'faq',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group ID': 'Groupe ID',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Key': 'Key',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'layouts',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Live Chat',
'Logged in': 'Connecté',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu modèle',
'My Sites': 'My Sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'please input your password again': "S'il vous plaît entrer votre mot de passe",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profile',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Examples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Record Updated': 'Record Updated',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration key': "Clé d'enregistrement",
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Semantic': 'Sémantique',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'état',
'Statistics': 'Statistics',
'Stylesheet': 'Feuille de style',
'submit': 'submit',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': 'User Voice',
'value already in database or empty': 'valeur déjà dans la base ou vide',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenu',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'You are successfully running web2py': 'Vous roulez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| lgpl-3.0 |
Kilhog/odoo | addons/crm_claim/crm_claim.py | 333 | 10079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_claim_stage(osv.osv):
""" Model for claim stages. This models the main stages of a claim
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.claim.stage"
_description = "Claim stages"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'section_ids':fields.many2many('crm.case.section', 'section_claim_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Common to All Teams',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
}
_defaults = {
'sequence': lambda *args: 1,
}
class crm_claim(osv.osv):
""" Crm claim
"""
_name = "crm.claim"
_description = "Claim"
_order = "priority,date desc"
_inherit = ['mail.thread']
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return self.pool.get('crm.lead')._resolve_section_id_from_context(cr, uid, context=context) or False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('sequence', '=', '1')], context=context)
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Claim Subject', required=True),
'active': fields.boolean('Active'),
'action_next': fields.char('Next Action'),
'date_action_next': fields.datetime('Next Action Date'),
'description': fields.text('Description'),
'resolution': fields.text('Resolution'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Claim Date', select=True),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]"),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'user_fault': fields.char('Trouble Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help="Responsible sales team."\
" Define Responsible user and Email account for"\
" mail gateway."),
'company_id': fields.many2one('res.company', 'Company'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway."),
'partner_phone': fields.char('Phone'),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', track_visibility='onchange',
domain="['|', ('section_ids', '=', section_id), ('case_default', '=', True)]"),
'cause': fields.text('Root Cause'),
}
_defaults = {
'user_id': lambda s, cr, uid, c: uid,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.case', context=c),
'priority': '1',
'active': lambda *a: 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c)
}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for claim in cases:
if claim.section_id:
section_ids.append(claim.section_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
search_domain.append(('case_default', '=', True))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.claim.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def onchange_partner_id(self, cr, uid, ids, partner_id, email=False, context=None):
"""This function returns value of partner address based on partner
:param email: ignored
"""
if not partner_id:
return {'value': {'email_from': False, 'partner_phone': False}}
address = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
return {'value': {'email_from': address.email, 'partner_phone': address.phone}}
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
# context: no_log, because subtype already handle this
return super(crm_claim, self).create(cr, uid, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
claim = self.browse(cr, uid, id, context=context)
default = dict(default or {},
stage_id = self._get_default_stage_id(cr, uid, context=context),
name = _('%s (copy)') % claim.name)
return super(crm_claim, self).copy(cr, uid, id, default, context=context)
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_claim, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def _claim_count(self, cr, uid, ids, field_name, arg, context=None):
Claim = self.pool['crm.claim']
return {
partner_id: Claim.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
_columns = {
'claim_count': fields.function(_claim_count, string='# Claims', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
toshywoshy/ansible | test/units/modules/network/dellos6/test_dellos6_command.py | 68 | 4307 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.dellos6 import dellos6_command
from units.modules.utils import set_module_args
from .dellos6_module import TestDellos6Module, load_fixture
class TestDellos6CommandModule(TestDellos6Module):
module = dellos6_command
def setUp(self):
super(TestDellos6CommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.dellos6.dellos6_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestDellos6CommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_dellos6_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Machine Description'))
def test_dellos6_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Machine Description'))
def test_dellos6_command_wait_for(self):
wait_for = 'result[0] contains "Machine Description"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_dellos6_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_dellos6_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_dellos6_command_match_any(self):
wait_for = ['result[0] contains "Machine Description"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_dellos6_command_match_all(self):
wait_for = ['result[0] contains "Machine Description"',
'result[0] contains "Dell Networking"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_dellos6_command_match_all_failure(self):
wait_for = ['result[0] contains "Machine Description"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
caphrim007/ansible | lib/ansible/plugins/lookup/grafana_dashboard.py | 20 | 6337 | # (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
lookup: grafana_dashboard
author: Thierry Salle (@seuf)
version_added: "2.7"
short_description: list or search grafana dashboards
description:
- This lookup returns a list of grafana dashboards with possibility to filter them by query.
options:
grafana_url:
description: url of grafana.
env:
- name: GRAFANA_URL
default: http://127.0.0.1:3000
grafana_api_key:
description:
- api key of grafana.
- when C(grafana_api_key) is set, the options C(grafan_user), C(grafana_password) and C(grafana_org_id) are ignored.
- Attention, please remove the two == at the end of the grafana_api_key
- because ansible lookup plugins options are splited on = (see example).
env:
- name: GRAFANA_API_KEY
grafana_user:
description: grafana authentication user.
env:
- name: GRAFANA_USER
default: admin
grafana_password:
description: grafana authentication password.
env:
- name: GRAFANA_PASSWORD
default: admin
grafana_org_id:
description: grafana organisation id.
env:
- name: GRAFANA_ORG_ID
default: 1
search:
description: optional filter for dashboard search.
env:
- name: GRAFANA_DASHBOARD_SEARCH
"""
EXAMPLES = """
- name: get project foo grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_user=admin grafana_password=admin search=foo') }}"
- name: get all grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_api_key=' ~ grafana_api_key|replace('==', '')) }}"
"""
import base64
import json
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six.moves.urllib.error import HTTPError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
ANSIBLE_GRAFANA_URL = 'http://127.0.0.1:3000'
ANSIBLE_GRAFANA_API_KEY = None
ANSIBLE_GRAFANA_USER = 'admin'
ANSIBLE_GRAFANA_PASSWORD = 'admin'
ANSIBLE_GRAFANA_ORG_ID = 1
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = None
if os.getenv('GRAFANA_URL') is not None:
ANSIBLE_GRAFANA_URL = os.environ['GRAFANA_URL']
if os.getenv('GRAFANA_API_KEY') is not None:
ANSIBLE_GRAFANA_API_KEY = os.environ['GRAFANA_API_KEY']
if os.getenv('GRAFANA_USER') is not None:
ANSIBLE_GRAFANA_USER = os.environ['GRAFANA_USER']
if os.getenv('GRAFANA_PASSWORD') is not None:
ANSIBLE_GRAFANA_PASSWORD = os.environ['GRAFANA_PASSWORD']
if os.getenv('GRAFANA_ORG_ID') is not None:
ANSIBLE_GRAFANA_ORG_ID = os.environ['GRAFANA_ORG_ID']
if os.getenv('GRAFANA_DASHBOARD_SEARCH') is not None:
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = os.environ['GRAFANA_DASHBOARD_SEARCH']
class GrafanaAPIException(Exception):
pass
class GrafanaAPI:
def __init__(self, **kwargs):
self.grafana_url = kwargs.get('grafana_url', ANSIBLE_GRAFANA_URL)
self.grafana_api_key = kwargs.get('grafana_api_key', ANSIBLE_GRAFANA_API_KEY)
self.grafana_user = kwargs.get('grafana_user', ANSIBLE_GRAFANA_USER)
self.grafana_password = kwargs.get('grafana_password', ANSIBLE_GRAFANA_PASSWORD)
self.grafana_org_id = kwargs.get('grafana_org_id', ANSIBLE_GRAFANA_ORG_ID)
self.search = kwargs.get('search', ANSIBLE_GRAFANA_DASHBOARD_SEARCH)
def grafana_switch_organisation(self, headers):
try:
r = open_url('%s/api/user/using/%s' % (self.grafana_url, self.grafana_org_id), headers=headers, method='POST')
except HTTPError as e:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, str(e)))
if r.getcode() != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, str(r.getcode())))
def grafana_headers(self):
headers = {'content-type': 'application/json; charset=utf8'}
if self.grafana_api_key:
headers['Authorization'] = "Bearer %s==" % self.grafana_api_key
else:
auth = base64.b64encode(to_bytes('%s:%s' % (self.grafana_user, self.grafana_password)).replace('\n', ''))
headers['Authorization'] = 'Basic %s' % auth
self.grafana_switch_organisation(headers)
return headers
def grafana_list_dashboards(self):
# define http headers
headers = self.grafana_headers()
dashboard_list = []
try:
if self.search:
r = open_url('%s/api/search?query=%s' % (self.grafana_url, self.search), headers=headers, method='GET')
else:
r = open_url('%s/api/search/' % self.grafana_url, headers=headers, method='GET')
except HTTPError as e:
raise GrafanaAPIException('Unable to search dashboards : %s' % str(e))
if r.getcode() == 200:
try:
dashboard_list = json.loads(r.read())
except Exception as e:
raise GrafanaAPIException('Unable to parse json list %s' % str(e))
else:
raise GrafanaAPIException('Unable to list grafana dashboards : %s' % str(r.getcode()))
return dashboard_list
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
grafana_args = terms[0].split(' ')
grafana_dict = {}
ret = []
for param in grafana_args:
try:
key, value = param.split('=')
except ValueError:
raise AnsibleError("grafana_dashboard lookup plugin needs key=value pairs, but received %s" % terms)
grafana_dict[key] = value
grafana = GrafanaAPI(**grafana_dict)
ret = grafana.grafana_list_dashboards()
return ret
| gpl-3.0 |
corbindavenport/me-irl-bot | memebot.py | 1 | 13750 | import praw
import json
import requests
import tweepy
import time
import os
import csv
import re
import configparser
import urllib.parse
import sys
from glob import glob
from gfycat.client import GfycatClient
from imgurpython import ImgurClient
import distutils.core
import itertools
import photohash
from PIL import Image
import urllib.request
# Location of the configuration file
CONFIG_FILE = 'config.ini'
def strip_title(title):
# Shortlink is 22 characters long, plus one character for a space
if len(title) < 280:
return title
else:
return title[:276] + '...'
def save_file(img_url, file_path):
resp = requests.get(img_url, stream=True)
if resp.status_code == 200:
with open(file_path, 'wb') as image_file:
for chunk in resp:
image_file.write(chunk)
# Return the path of the image, which is always the same since we just overwrite images
return file_path
else:
print('[EROR] File failed to download. Status code: ' + str(resp.status_code))
return
def get_media(img_url, post_id):
if any(s in img_url for s in ('i.redd.it', 'i.reddituploads.com')):
file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
file_extension = os.path.splitext(img_url)[-1].lower()
# Fix for issue with i.reddituploads.com links not having a file extension in the URL
if not file_extension:
file_extension += '.jpg'
file_name += '.jpg'
img_url += '.jpg'
# Grab the GIF versions of .GIFV links
# When Tweepy adds support for video uploads, we can use grab the MP4 versions
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
file_name = file_name.replace('.gifv', '.gif')
img_url = img_url.replace('.gifv', '.gif')
# Download the file
file_path = IMAGE_DIR + '/' + file_name
print('[ OK ] Downloading file at URL ' + img_url + ' to ' + file_path + ', file type identified as ' + file_extension)
img = save_file(img_url, file_path)
return img
elif ('imgur.com' in img_url): # Imgur
try:
client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
except BaseException as e:
print ('[EROR] Error while authenticating with Imgur:', str(e))
return
# Working demo of regex: https://regex101.com/r/G29uGl/2
regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Imgur image/gallery ID
id = m.group(1)
if any(s in img_url for s in ('/a/', '/gallery/')): # Gallery links
images = client.get_album_images(id)
# Only the first image in a gallery is used
imgur_url = images[0].link
else: # Single image
imgur_url = client.get_image(id).link
# If the URL is a GIFV link, change it to a GIF
file_extension = os.path.splitext(imgur_url)[-1].lower()
if (file_extension == '.gifv'):
file_extension = file_extension.replace('.gifv', '.gif')
img_url = imgur_url.replace('.gifv', '.gif')
# Download the image
file_path = IMAGE_DIR + '/' + id + file_extension
print('[ OK ] Downloading Imgur image at URL ' + imgur_url + ' to ' + file_path)
imgur_file = save_file(imgur_url, file_path)
# Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
if (file_extension == '.gif'):
# Open the file using the Pillow library
img = Image.open(imgur_file)
# Get the MIME type
mime = Image.MIME[img.format]
if (mime == 'image/gif'):
# Image is indeed a GIF, so it can be posted
img.close()
return imgur_file
else:
# Image is not actually a GIF, so don't post it
print('[EROR] Imgur has not processed a GIF version of this link, so it can not be posted')
img.close()
# Delete the image
try:
os.remove(imgur_file)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
return
else:
return imgur_file
else:
print('[EROR] Could not identify Imgur image/gallery ID in this URL:', img_url)
return
elif ('gfycat.com' in img_url): # Gfycat
gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
client = GfycatClient()
gfycat_info = client.query_gfy(gfycat_name)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
gfycat_url = gfycat_info['gfyItem']['max2mbGif']
file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' + file_path)
gfycat_file = save_file(gfycat_url, file_path)
return gfycat_file
elif ('giphy.com' in img_url): # Giphy
# Working demo of regex: https://regex101.com/r/o8m1kA/2
regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
m = re.search(regex, img_url, flags=0)
if m:
# Get the Giphy ID
id = m.group(3)
# Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' + file_path)
giphy_file = save_file(giphy_url, file_path)
return giphy_file
else:
print('[EROR] Could not identify Giphy ID in this URL:', img_url)
return
else:
print('[WARN] Post', post_id, 'doesn\'t point to an image/GIF:', img_url)
return
def tweet_creator(subreddit_info):
post_dict = {}
print ('[ OK ] Getting posts from Reddit')
for submission in subreddit_info.hot(limit=POST_LIMIT):
# If the OP has deleted his account, save it as "a deleted user"
if submission.author is None:
submission.author = "a deleted user"
submission.author.name = "a deleted user"
else:
submission.author.name = "/u/" + submission.author.name
if (submission.over_18 and NSFW_POSTS_ALLOWED is False):
# Skip over NSFW posts if they are disabled in the config file
print('[ OK ] Skipping', submission.id, 'because it is marked as NSFW')
continue
else:
post_dict[strip_title(submission.title)] = [submission.id,submission.url,submission.shortlink,submission.author.name]
return post_dict
def setup_connection_reddit(subreddit):
print ('[ OK ] Setting up connection with Reddit...')
r = praw.Reddit(
user_agent='memebot',
client_id=REDDIT_AGENT,
client_secret=REDDIT_CLIENT_SECRET)
return r.subreddit(subreddit)
def duplicate_check(id):
value = False
with open(CACHE_CSV, 'rt', newline='') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if id in row:
value = True
return value
def hash_check(hash):
if hash:
value = False
# Only extract last three lines from cache file
post_list = []
with open(CACHE_CSV, 'rt', newline='') as f:
for line in f:
post_list.append(line)
if len(post_list) > REPOST_LIMIT:
post_list.pop(0)
if any(hash in s for s in post_list):
value = True
else:
value = True
return value
def log_post(id, hash, tweetID):
with open(CACHE_CSV, 'a', newline='') as cache:
date = time.strftime("%d/%m/%Y") + ' ' + time.strftime("%H:%M:%S")
wr = csv.writer(cache, delimiter=',')
wr.writerow([id, date, hash, tweetID])
def main():
# Make sure logging file and media directory exists
if not os.path.exists(CACHE_CSV):
with open(CACHE_CSV, 'w', newline='') as cache:
default = ['Post','Date and time','Image hash', 'Tweet link']
wr = csv.writer(cache)
wr.writerow(default)
print ('[ OK ] ' + CACHE_CSV + ' file not found, created a new one')
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
print ('[ OK ] ' + IMAGE_DIR + ' folder not found, created a new one')
# Continue with script
subreddit = setup_connection_reddit(SUBREDDIT_TO_MONITOR)
post_dict = tweet_creator(subreddit)
tweeter(post_dict)
def alt_tweeter(post_link, op, username, newestTweet):
try:
# Log into alternate account
auth = tweepy.OAuthHandler(ALT_CONSUMER_KEY, ALT_CONSUMER_SECRET)
auth.set_access_token(ALT_ACCESS_TOKEN, ALT_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# Post the tweet
tweetText = '@' + username + ' Originally posted by ' + op + ' on Reddit: ' + post_link
print('[ OK ] Posting this on alt Twitter account:', tweetText)
api.update_status(tweetText, newestTweet)
except BaseException as e:
print ('[EROR] Error while posting tweet on alt account:', str(e))
return
def tweeter(post_dict):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
for post in post_dict:
# Grab post details from dictionary
post_id = post_dict[post][0]
if not duplicate_check(post_id): # Make sure post is not a duplicate
file_path = get_media(post_dict[post][1], post_dict[post][0])
post_link = post_dict[post][2]
post_op = post_dict[post][3]
# Make sure the post contains media (if it doesn't, then file_path would be blank)
if (file_path):
# Scan the image against previously-posted images
try:
hash = photohash.average_hash(file_path)
print ('[ OK ] Image hash check:', hash_check(hash))
except:
# Set hash to an empty string if the check failed
hash = ""
print ('[WARN] Could not check image hash, skipping.')
# Only make a tweet if the post has not already been posted (if repost protection is enabled)
if ((REPOST_PROTECTION is True) and (hash_check(hash) is False)):
print ('[ OK ] Posting this on main twitter account:', post, file_path)
try:
# Post the tweet
api.update_with_media(filename=file_path, status=post)
# Log the tweet
username = api.me().screen_name
latestTweets = api.user_timeline(screen_name = username, count = 1, include_rts = False)
newestTweet = latestTweets[0].id_str
log_post(post_id, hash, 'https://twitter.com/' + username + '/status/' + newestTweet + '/')
# Post alt tweet
if ALT_ACCESS_TOKEN:
alt_tweeter(post_link, post_op, username, newestTweet)
else:
print('[WARN] No authentication info for alternate account in config.ini, skipping alt tweet.')
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
except BaseException as e:
print ('[EROR] Error while posting tweet:', str(e))
# Log the post anyways
log_post(post_id, hash, 'Error while posting tweet: ' + str(e))
else:
print ('[ OK ] Skipping', post_id, 'because it is a repost or Memebot previously failed to post it')
log_post(post_id, hash, 'Post was already tweeted or was identified as a repost')
# Cleanup media file
try:
os.remove(file_path)
print ('[ OK ] Deleted media file at ' + file_path)
except BaseException as e:
print ('[EROR] Error while deleting media file:', str(e))
else:
print ('[ OK ] Ignoring', post_id, 'because there was not a media file downloaded')
else:
print ('[ OK ] Ignoring', post_id, 'because it was already posted')
if __name__ == '__main__':
# Check for updates
try:
with urllib.request.urlopen("https://raw.githubusercontent.com/corbindavenport/memebot/update-check/current-version.txt") as url:
s = url.read()
new_version = s.decode("utf-8").rstrip()
current_version = 3.0 # Current version of script
if (current_version < float(new_version)):
print('IMPORTANT: A new version of Memebot (' + str(new_version) + ') is available! (you have ' + str(current_version) + ')')
print ('IMPORTANT: Get the latest update from here: https://github.com/corbindavenport/memebot/releases')
else:
print('[ OK ] You have the latest version of Memebot (' + str(current_version) + ')')
except BaseException as e:
print ('[EROR] Error while checking for updates:', str(e))
# Make sure config file exists
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
except BaseException as e:
print ('[EROR] Error while reading config file:', str(e))
sys.exit()
# Create variables from config file
CACHE_CSV = config['BotSettings']['CacheFile']
IMAGE_DIR = config['BotSettings']['MediaFolder']
DELAY_BETWEEN_TWEETS = int(config['BotSettings']['DelayBetweenTweets'])
POST_LIMIT = int(config['BotSettings']['PostLimit'])
SUBREDDIT_TO_MONITOR = config['BotSettings']['SubredditToMonitor']
NSFW_POSTS_ALLOWED = bool(distutils.util.strtobool(config['BotSettings']['NSFWPostsAllowed']))
REPOST_PROTECTION = bool(distutils.util.strtobool(config['RepostSettings']['RepostProtection']))
REPOST_LIMIT = int(config['RepostSettings']['RepostLimit'])
ACCESS_TOKEN = config['PrimaryTwitterKeys']['AccessToken']
ACCESS_TOKEN_secret = config['PrimaryTwitterKeys']['AccessTokenSecret']
CONSUMER_KEY = config['PrimaryTwitterKeys']['ConsumerKey']
CONSUMER_SECRET = config['PrimaryTwitterKeys']['ConsumerSecret']
ALT_ACCESS_TOKEN = config['AltTwitterKeys']['AccessToken']
ALT_ACCESS_TOKEN_SECRET = config['AltTwitterKeys']['AccessTokenSecret']
ALT_CONSUMER_KEY = config['AltTwitterKeys']['ConsumerKey']
ALT_CONSUMER_SECRET = config['AltTwitterKeys']['ConsumerSecret']
REDDIT_AGENT = config['Reddit']['Agent']
REDDIT_CLIENT_SECRET = config['Reddit']['ClientSecret']
IMGUR_CLIENT = config['Imgur']['ClientID']
IMGUR_CLIENT_SECRET = config['Imgur']['ClientSecret']
# Set the command line window title on Windows
if os.name == 'nt':
try:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_secret)
api = tweepy.API(auth)
username = api.me().screen_name
title = '@' + username + ' - Memebot'
except:
title = 'Memebot'
os.system('title ' + title)
# Run the main script
while True:
main()
print('[ OK ] Sleeping for', DELAY_BETWEEN_TWEETS, 'seconds')
time.sleep(DELAY_BETWEEN_TWEETS)
print('[ OK ] Restarting main()...') | gpl-3.0 |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/encodings/cp862.py | 593 | 33626 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
u'\u05d1' # 0x0081 -> HEBREW LETTER BET
u'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x0083 -> HEBREW LETTER DALET
u'\u05d4' # 0x0084 -> HEBREW LETTER HE
u'\u05d5' # 0x0085 -> HEBREW LETTER VAV
u'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x0087 -> HEBREW LETTER HET
u'\u05d8' # 0x0088 -> HEBREW LETTER TET
u'\u05d9' # 0x0089 -> HEBREW LETTER YOD
u'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x008b -> HEBREW LETTER KAF
u'\u05dc' # 0x008c -> HEBREW LETTER LAMED
u'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x008e -> HEBREW LETTER MEM
u'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x0090 -> HEBREW LETTER NUN
u'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
u'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x0094 -> HEBREW LETTER PE
u'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
u'\u05e7' # 0x0097 -> HEBREW LETTER QOF
u'\u05e8' # 0x0098 -> HEBREW LETTER RESH
u'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
u'\u05ea' # 0x009a -> HEBREW LETTER TAV
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
IceCTF/ctf-platform | api/api_manager.py | 1 | 12262 | #!/usr/bin/python3
"""
Problem management script.
"""
import argparse
import sys
import logging
import api
from api.common import APIException, InternalException
from os import path, walk, makedirs
from bson import json_util
import shutil
import glob
def check_files_exist(files):
for f in files:
if not path.isfile(f):
logging.critical("No such file {}".format(f))
return False
return True
def insert_objects(f, files):
objects = get_json_objects(files)
for obj in objects:
try:
f(obj)
except APIException as error:
raise
exit(1)
def get_json_objects(files):
objects = []
for f in files:
contents = open(f, "r").read()
data = json_util.loads(contents)
if isinstance(data, list):
objects += data
elif isinstance(data, dict):
objects.append(data)
else:
logging.warning("JSON file {} did not contain an object or list of objects".format(f))
return objects
def migrate_problems(args):
files = args.files
output_file = args.output
if not check_files_exist(files):
return
migration_key = {
"displayname": "name",
"basescore": "score",
"desc": "description",
"relatedproblems": "related_problems"
}
migration_overwrites = {
"grader": "test.py",
"autogen": False
}
deletion_key = ["_id", "pid", "generator", "submissiontype", "devnotes"]
problems = get_json_objects(files)
output = ""
def get_display_name_from_pid(problems, pid):
for problem in problems:
if problem.get("pid") == pid:
return problem.get("displayname")
for problem in problems:
if problem.get("weightmap"):
new_map = {}
for pid, num in problem["weightmap"].items():
name = get_display_name_from_pid(problems, pid)
new_map[name] = num
problem["weightmap"] = new_map
for problem in problems:
if "desc" not in problem:
problem["desc"] = "I'm bad."
for key in list(problem.keys()):
if key in migration_key:
problem[migration_key[key]] = problem[key]
if key in migration_overwrites:
problem[key] = migration_overwrites[key]
if key in migration_key or key in deletion_key:
problem.pop(key, None)
output += json_util.dumps(problem) + "\n"
output_file.write(output)
def build_autogen(args):
instances = args.instance_count
problems = api.problem.get_all_problems(show_disabled=True)
for problem in problems:
if problem.get("autogen", False):
api.autogen.build_problem_instances(problem["pid"], instances)
def list_problems(args):
#TODO: This could be improved
problems = api.problem.get_all_problems(show_disabled=True)
for problem in problems:
print("{} ({}) - {} points".format(problem["name"], "disabled" if problem["disabled"] else "enabled", problem["score"]))
def clear_collections(args):
db = api.common.get_conn()
for collection in args.collections:
db[collection].remove()
def get_output_file(output):
if output == sys.stdout:
return output
else:
try:
return open(output, "w")
except IOError as error:
logging.warning(error)
exit(1)
def add_new_problems(args):
if check_files_exist(args.files):
insert_objects(api.problem.insert_problem, args.files)
errors = api.problem.analyze_problems()
for error in errors:
logging.warning(error)
def add_new_achievements(args):
if check_files_exist(args.files):
objects = get_json_objects(args.files)
for obj in objects:
try:
if api.common.safe_fail(api.achievement.get_achievement, aid=api.common.hash(obj["name"])):
api.achievement.update_achievement(api.common.hash(obj["name"]),obj)
else:
api.achievement.insert_achievement(obj)
except APIException as error:
raise
exit(1)
def load_problems(args):
problem_dir = args.problems_directory[0]
grader_dir = args.graders_directory[0]
static_dir = args.static_directory[0]
if not path.exists(static_dir):
logging.debug("No directory {}. Creating...".format(static_dir))
makedirs(static_dir)
if not path.exists(problem_dir):
logging.critical("No such directory: {}".format(problem_dir))
return
for (dirpath, dirnames, filenames) in walk(problem_dir):
if "problem.json" in filenames:
json_file = path.join(dirpath, 'problem.json')
contents = open(json_file, "r").read()
try:
data = json_util.loads(contents)
except ValueError as e:
logging.warning("Invalid JSON format in file {filename} ({exception})".format(filename=json_file,
exception=e))
continue
if not isinstance(data, dict):
logging.warning("Invalid JSON format in file {}".format(json_file))
continue
if 'name' not in data:
logging.warning("Invalid problem format in file {}".format(json_file))
continue
problem_name = data['name']
relative_path = path.relpath(dirpath, problem_dir)
logging.info("Found problem '{}'".format(problem_name))
if 'grader' not in dirnames:
logging.warning("Problem '{}' appears to have no grader folder. Skipping...".format(problem_name))
continue
grader_path = path.join(grader_dir, relative_path)
if path.exists(grader_path):
shutil.rmtree(grader_path)
shutil.copytree(path.join(dirpath, 'grader'), grader_path)
logging.info("Graders updated for problem {}".format(problem_name))
try:
if api.common.safe_fail(api.problem.get_problem, pid=api.common.hash(problem_name)):
api.problem.update_problem(api.common.hash(problem_name), data)
else:
api.problem.insert_problem(data)
except api.common.WebException as e:
logging.info("Problem '{}' was not added to the database. Reason: {}".format(problem_name, e))
if 'static' in dirnames:
logging.info("Found a static directory for '{}'. Copying...".format(problem_name))
static_path = path.join(static_dir, relative_path)
if path.exists(static_path):
shutil.rmtree(static_path)
shutil.copytree(path.join(dirpath, 'static'), static_path)
errors = api.problem.analyze_problems()
for error in errors:
logging.warning(error)
def recalculate_eligibility(args):
api.team.recalculate_all_eligibility()
def get_stats(args):
api.stats.get_stats()
def change_user_password(args):
api.user.update_password(api.common.hash(args.user), args.password)
def main():
parser = argparse.ArgumentParser(description="{} problem manager".format(api.config.competition_name))
debug_level = parser.add_mutually_exclusive_group()
debug_level.add_argument('-v', '--verbose', help="Print intermediate results", action="store_true")
debug_level.add_argument('-s', '--silent', help="Print out very little", action="store_true")
subparser = parser.add_subparsers(help='Select one of the following actions')
# Autogen
parser_autogen = subparser.add_parser('autogen', help='Deal with Problem Autogeneration')
subparser_autogen = parser_autogen.add_subparsers(help='Select one of the following actions')
parser_autogen_build = subparser_autogen.add_parser('build', help='Build new autogen instances')
parser_autogen_build.add_argument("instance_count", type=int, help="How many instances of each problem to build")
parser_autogen_build.set_defaults(func=build_autogen)
# Problems
parser_problems = subparser.add_parser('problems', help='Deal with Problems')
subparser_problems = parser_problems.add_subparsers(help='Select one of the following actions')
parser_problems_import = subparser_problems.add_parser('import', help='Import problems (from JSON) into the database')
parser_problems_import.add_argument("files", nargs="+", help="Files containing problems to insert.")
parser_problems_import.set_defaults(func=add_new_problems)
parser_problems_load = subparser_problems.add_parser('load', help='Load problems that follow the special problem format')
parser_problems_load.add_argument("problems_directory", nargs=1, help="Directory where problems are located")
parser_problems_load.add_argument("graders_directory", nargs=1, help="Directory where graders are stored")
parser_problems_load.add_argument("static_directory", nargs=1, help="Directory where static problem content is stored")
parser_problems_load.set_defaults(func=load_problems)
parser_problems_list = subparser_problems.add_parser('list', help='List problems in the database')
parser_problems_list.set_defaults(func=list_problems)
parser_problems_migrate = subparser_problems.add_parser('migrate', help='Migrate 2013 problems to the new format')
parser_problems_migrate.add_argument('-o', '--output', action="store", help="Output file.", default=sys.stdout)
parser_problems_migrate.set_defaults(func=migrate_problems)
# Achievements
parser_achievements = subparser.add_parser('achievements', help='Deal with Achievements')
subparser_achievements = parser_achievements.add_subparsers(help='Select one of the following actions')
parser_achievements_load = subparser_achievements.add_parser('load', help='Load new achievements into the database')
parser_achievements_load.add_argument("files", nargs="+", help="Files containing achievements to insert.")
parser_achievements_load.set_defaults(func=add_new_achievements)
# Database
parser_database = subparser.add_parser("database", help="Deal with database")
subparser_database = parser_database.add_subparsers(help="Select one of the following actions")
parser_database_clear = subparser_database.add_parser("clear", help="Clear collections")
parser_database_clear.add_argument("collections", nargs="+", help="Collections to clear")
parser_database_clear.set_defaults(func=clear_collections)
# Teams
parser_team = subparser.add_parser("team", help="Deal with team")
subparser_team = parser_team.add_subparsers(help="Select one of the following actions")
parser_team_clear = subparser_team.add_parser("eligibility", help="Recalculate Eligibility")
parser_team_clear.set_defaults(func=recalculate_eligibility)
# Users
parser_user = subparser.add_parser("user", help="Deal with user")
subparser_user = parser_user.add_subparsers(help="Select one of the following actions")
parser_user_password = subparser_user.add_parser("password", help="Change user password")
parser_user_password.add_argument("user", help="user")
parser_user_password.add_argument("password", help="New user password")
parser_user_password.set_defaults(func=change_user_password)
# Stats
parser_stats = subparser.add_parser("stats", help="Deal with stats")
subparser_stats = parser_stats.add_subparsers(help="Select one of the following actions")
parser_stats_clear = subparser_stats.add_parser("show", help="Display stats")
parser_stats_clear.set_defaults(func=get_stats)
args = parser.parse_args()
if args.silent:
logging.basicConfig(level=logging.CRITICAL, stream=sys.stdout)
elif args.verbose:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(level=logging.WARNING, stream=sys.stdout)
if 'func' in args:
args.func(args)
else:
parser.print_help()
main()
| mit |
BigBoss424/a-zplumbing | Magento-CE-2/update/vendor/justinrainbow/json-schema/docs/conf.py | 74 | 7837 | # -*- coding: utf-8 -*-
#
# JsonSchema documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 10 15:34:44 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JsonSchema'
copyright = u'2011, Justin Rainbow, Bruno Prieto Reis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsonSchemadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JsonSchema.tex', u'JsonSchema Documentation',
u'Justin Rainbow, Bruno Prieto Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jsonschema', u'JsonSchema Documentation',
[u'Justin Rainbow, Bruno Prieto Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'JsonSchema', u'JsonSchema Documentation', u'Justin Rainbow, Bruno Prieto Reis',
'JsonSchema', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-3.0 |
burkesquires/pyeq2 | UnitTests/Test_IndividualPolyFunctions.py | 3 | 10863 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys, os, unittest
# the pyeq2 directory is located up one level from here
if -1 != sys.path[0].find('pyeq2-master'):raise Exception('Please rename git checkout directory from "pyeq2-master" to "pyeq2"')
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(all= 'ignore')
class TestPolyFunctions(unittest.TestCase):
def test_Offset_Term(self):
term = pyeq2.PolyFunctions.Offset_Term('varName', 'codeName')
htmlShouldBe = 'Offset'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'Offset'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.0])
valueShouldBe = numpy.array([1.0])
self.assertEqual(term.value(testArray), valueShouldBe)
def test_ArcTangent_Term(self):
term = pyeq2.PolyFunctions.ArcTangent_Term('varName', 'codeName')
htmlShouldBe = 'atan(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'atan(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.0])
valueShouldBe = numpy.arctan(1.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_NegativeOne_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='-1.0', logFlag=False)
htmlShouldBe = 'varName<sup>-1.0</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, -1.0)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, -1.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_HyperbolicCosine_Term(self):
term = pyeq2.PolyFunctions.HyperbolicCosine_Term('varName', 'codeName')
htmlShouldBe = 'cosh(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'cosh(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.cosh(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_OnePointFive_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='1.5', logFlag=False)
htmlShouldBe = 'varName<sup>1.5</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, 1.5)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, 1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_ZeroPointFive_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='0.5', logFlag=False)
htmlShouldBe = 'varName<sup>0.5</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, 0.5)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, 0.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_VariableUnchanged_Term(self):
term = pyeq2.PolyFunctions.VariableUnchanged_Term('varName', 'codeName')
htmlShouldBe = 'varName'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'codeName'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.array([1.5])
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_Two_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='2.0', logFlag=False)
htmlShouldBe = 'varName<sup>2.0</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, 2.0)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, 2.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_HyperbolicSine_Term(self):
term = pyeq2.PolyFunctions.HyperbolicSine_Term('varName', 'codeName')
htmlShouldBe = 'sinh(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'sinh(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.sinh(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Exponential_VariableUnchanged_Term(self):
term = pyeq2.PolyFunctions.Exponential_VariableUnchanged_Term('varName', 'codeName')
htmlShouldBe = 'exp(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'exp(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.exp(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Exponential_VariableTimesNegativeOne_Term(self):
term = pyeq2.PolyFunctions.Exponential_VariableTimesNegativeOne_Term('varName', 'codeName')
htmlShouldBe = 'exp(-varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'exp(-1.0 * codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.exp(-1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Sine_Term(self):
term = pyeq2.PolyFunctions.Sine_Term('varName', 'codeName')
htmlShouldBe = 'sin(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'sin(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.sin(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Cosine_Term(self):
term = pyeq2.PolyFunctions.Cosine_Term('varName', 'codeName')
htmlShouldBe = 'cos(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'cos(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.cos(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Tangent_Term(self):
term = pyeq2.PolyFunctions.Tangent_Term('varName', 'codeName')
htmlShouldBe = 'tan(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'tan(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.tan(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_HyperbolicTangent_Term(self):
term = pyeq2.PolyFunctions.HyperbolicTangent_Term('varName', 'codeName')
htmlShouldBe = 'tanh(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'tanh(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.tanh(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_NegativeZeroPointFive_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='-0.5', logFlag=False)
htmlShouldBe = 'varName<sup>-0.5</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, -0.5)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, -0.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_NegativeTwo_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='-2', logFlag=False)
htmlShouldBe = 'varName<sup>-2</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(codeName, -2)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(1.5, -2.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Log_Term(self):
term = pyeq2.PolyFunctions.Log_Term('varName', 'codeName')
htmlShouldBe = 'ln(varName)'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'log(codeName)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.log(1.5)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_NegativeOne_OfLog_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='-1.0', logFlag=True)
htmlShouldBe = 'ln(varName)<sup>-1.0</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(log(codeName), -1.0)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(numpy.log(1.5), -1.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_Two_OfLog_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='2.0', logFlag=True)
htmlShouldBe = 'ln(varName)<sup>2.0</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(log(codeName), 2.0)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(numpy.log(1.5), 2.0)
self.assertEqual(term.value(testArray), valueShouldBe)
def test_Power_NegativeTwo_OfLog_Term(self):
term = pyeq2.PolyFunctions.PowerTerm('varName', 'codeName', powerString='-2.0', logFlag=True)
htmlShouldBe = 'ln(varName)<sup>-2.0</sup>'
self.assertEqual(term.HTML, htmlShouldBe)
cppShouldBe = 'pow(log(codeName), -2.0)'
self.assertEqual(term.CPP, cppShouldBe)
testArray = numpy.array([1.5])
valueShouldBe = numpy.power(numpy.log(1.5), -2.0)
self.assertEqual(term.value(testArray), valueShouldBe)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
mheap/ansible | test/units/parsing/utils/test_addresses.py | 135 | 3742 | # -*- coding: utf-8 -*-
import unittest
from ansible.parsing.utils.addresses import parse_address
class TestParseAddress(unittest.TestCase):
tests = {
# IPv4 addresses
'192.0.2.3': ['192.0.2.3', None],
'192.0.2.3:23': ['192.0.2.3', 23],
# IPv6 addresses
'::': ['::', None],
'::1': ['::1', None],
'[::1]:442': ['::1', 442],
'abcd:ef98:7654:3210:abcd:ef98:7654:3210': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', None],
'[abcd:ef98:7654:3210:abcd:ef98:7654:3210]:42': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', 42],
'1234:5678:9abc:def0:1234:5678:9abc:def0': ['1234:5678:9abc:def0:1234:5678:9abc:def0', None],
'1234::9abc:def0:1234:5678:9abc:def0': ['1234::9abc:def0:1234:5678:9abc:def0', None],
'1234:5678::def0:1234:5678:9abc:def0': ['1234:5678::def0:1234:5678:9abc:def0', None],
'1234:5678:9abc::1234:5678:9abc:def0': ['1234:5678:9abc::1234:5678:9abc:def0', None],
'1234:5678:9abc:def0::5678:9abc:def0': ['1234:5678:9abc:def0::5678:9abc:def0', None],
'1234:5678:9abc:def0:1234::9abc:def0': ['1234:5678:9abc:def0:1234::9abc:def0', None],
'1234:5678:9abc:def0:1234:5678::def0': ['1234:5678:9abc:def0:1234:5678::def0', None],
'1234:5678:9abc:def0:1234:5678::': ['1234:5678:9abc:def0:1234:5678::', None],
'::9abc:def0:1234:5678:9abc:def0': ['::9abc:def0:1234:5678:9abc:def0', None],
'0:0:0:0:0:ffff:1.2.3.4': ['0:0:0:0:0:ffff:1.2.3.4', None],
'0:0:0:0:0:0:1.2.3.4': ['0:0:0:0:0:0:1.2.3.4', None],
'::ffff:1.2.3.4': ['::ffff:1.2.3.4', None],
'::1.2.3.4': ['::1.2.3.4', None],
'1234::': ['1234::', None],
# Hostnames
'some-host': ['some-host', None],
'some-host:80': ['some-host', 80],
'some.host.com:492': ['some.host.com', 492],
'[some.host.com]:493': ['some.host.com', 493],
'a-b.3foo_bar.com:23': ['a-b.3foo_bar.com', 23],
u'fóöbär': [u'fóöbär', None],
u'fóöbär:32': [u'fóöbär', 32],
u'fóöbär.éxàmplê.com:632': [u'fóöbär.éxàmplê.com', 632],
# Various errors
'': [None, None],
'some..host': [None, None],
'some.': [None, None],
'[example.com]': [None, None],
'some-': [None, None],
'some-.foo.com': [None, None],
'some.-foo.com': [None, None],
}
range_tests = {
'192.0.2.[3:10]': ['192.0.2.[3:10]', None],
'192.0.2.[3:10]:23': ['192.0.2.[3:10]', 23],
'abcd:ef98::7654:[1:9]': ['abcd:ef98::7654:[1:9]', None],
'[abcd:ef98::7654:[6:32]]:2222': ['abcd:ef98::7654:[6:32]', 2222],
'[abcd:ef98::7654:[9ab3:fcb7]]:2222': ['abcd:ef98::7654:[9ab3:fcb7]', 2222],
u'fóöb[a:c]r.éxàmplê.com:632': [u'fóöb[a:c]r.éxàmplê.com', 632],
'[a:b]foo.com': ['[a:b]foo.com', None],
'foo[a:b].com': ['foo[a:b].com', None],
'foo[a:b]:42': ['foo[a:b]', 42],
'foo[a-b]-.com': [None, None],
'foo[a-b]:32': [None, None],
'foo[x-y]': [None, None],
}
def test_without_ranges(self):
for t in self.tests:
test = self.tests[t]
try:
(host, port) = parse_address(t)
except:
host = None
port = None
assert host == test[0]
assert port == test[1]
def test_with_ranges(self):
for t in self.range_tests:
test = self.range_tests[t]
try:
(host, port) = parse_address(t, allow_ranges=True)
except:
host = None
port = None
assert host == test[0]
assert port == test[1]
| gpl-3.0 |
alexlo03/ansible | test/units/plugins/action/test_action.py | 21 | 27119 | # -*- coding: utf-8 -*-
# (c) 2015, Florian Apolloner <florian@apolloner.eu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible import constants as C
from units.compat import unittest
from units.compat.mock import patch, MagicMock, mock_open
from ansible.errors import AnsibleError
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote, builtins
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.action import ActionBase
from ansible.template import Templar
from ansible.vars.clean import clean_facts
from units.mock.loader import DictDataLoader
python_module_replacers = br"""
#!/usr/bin/python
#ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
#MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
#SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
test = u'Toshio \u304f\u3089\u3068\u307f'
from ansible.module_utils.basic import *
"""
powershell_module_replacers = b"""
WINDOWS_ARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
# POWERSHELL_COMMON
"""
class DerivedActionBase(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
# We're not testing the plugin run() method, just the helper
# methods ActionBase defines
return super(DerivedActionBase, self).run(tmp=tmp, task_vars=task_vars)
class TestActionBase(unittest.TestCase):
def test_action_base_run(self):
mock_task = MagicMock()
mock_task.action = "foo"
mock_task.args = dict(a=1, b=2, c=3)
mock_connection = MagicMock()
play_context = PlayContext()
mock_task.async_val = None
action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
results = action_base.run()
self.assertEqual(results, dict())
mock_task.async_val = 0
action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
results = action_base.run()
self.assertEqual(results, {})
def test_action_base__configure_module(self):
fake_loader = DictDataLoader({
})
# create our fake task
mock_task = MagicMock()
mock_task.action = "copy"
mock_task.async_val = 0
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# create a mock shared loader object
def mock_find_plugin(name, options):
if name == 'badmodule':
return None
elif '.ps1' in options:
return '/fake/path/to/%s.ps1' % name
else:
return '/fake/path/to/%s' % name
mock_module_loader = MagicMock()
mock_module_loader.find_plugin.side_effect = mock_find_plugin
mock_shared_obj_loader = MagicMock()
mock_shared_obj_loader.module_loader = mock_module_loader
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_obj_loader,
)
# test python module formatting
with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))):
with patch.object(os, 'rename'):
mock_task.args = dict(a=1, foo='fö〩')
mock_connection.module_implementation_preferences = ('',)
(style, shebang, data, path) = action_base._configure_module(mock_task.action, mock_task.args)
self.assertEqual(style, "new")
self.assertEqual(shebang, u"#!/usr/bin/python")
# test module not found
self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args)
# test powershell module formatting
with patch.object(builtins, 'open', mock_open(read_data=to_bytes(powershell_module_replacers.strip(), encoding='utf-8'))):
mock_task.action = 'win_copy'
mock_task.args = dict(b=2)
mock_connection.module_implementation_preferences = ('.ps1',)
(style, shebang, data, path) = action_base._configure_module('stat', mock_task.args)
self.assertEqual(style, "new")
self.assertEqual(shebang, u'#!powershell')
# test module not found
self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args)
def test_action_base__compute_environment_string(self):
fake_loader = DictDataLoader({
})
# create our fake task
mock_task = MagicMock()
mock_task.action = "copy"
mock_task.args = dict(a=1)
# create a mock connection, so we don't actually try and connect to things
def env_prefix(**args):
return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in args.items()])
mock_connection = MagicMock()
mock_connection._shell.env_prefix.side_effect = env_prefix
# we're using a real play context here
play_context = PlayContext()
# and we're using a real templar here too
templar = Templar(loader=fake_loader)
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=fake_loader,
templar=templar,
shared_loader_obj=None,
)
# test standard environment setup
mock_task.environment = [dict(FOO='foo'), None]
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=foo")
# test where environment is not a list
mock_task.environment = dict(FOO='foo')
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=foo")
# test environment with a variable in it
templar.set_available_variables(variables=dict(the_var='bar'))
mock_task.environment = [dict(FOO='{{the_var}}')]
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=bar")
# test with a bad environment set
mock_task.environment = dict(FOO='foo')
mock_task.environment = ['hi there']
self.assertRaises(AnsibleError, action_base._compute_environment_string)
def test_action_base__early_needs_tmp_path(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
self.assertFalse(action_base._early_needs_tmp_path())
action_base.TRANSFERS_FILES = True
self.assertTrue(action_base._early_needs_tmp_path())
def test_action_base__make_tmp_path(self):
# create our fake task
mock_task = MagicMock()
def get_shell_opt(opt):
ret = None
if opt == 'admin_users':
ret = ['root', 'toor', 'Administrator']
elif opt == 'remote_tmp':
ret = '~/.ansible/tmp'
return ret
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection.transport = 'ssh'
mock_connection._shell.mkdtemp.return_value = 'mkdir command'
mock_connection._shell.join_path.side_effect = os.path.join
mock_connection._shell.get_option = get_shell_opt
mock_connection._shell.HOMES_RE = re.compile(r'(\'|\")?(~|\$HOME)(.*)')
# we're using a real play context here
play_context = PlayContext()
play_context.become = True
play_context.become_user = 'foo'
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._low_level_execute_command = MagicMock()
action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path')
self.assertEqual(action_base._make_tmp_path('root'), '/some/path/')
# empty path fails
action_base._low_level_execute_command.return_value = dict(rc=0, stdout='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# authentication failure
action_base._low_level_execute_command.return_value = dict(rc=5, stdout='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# ssh error
action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
play_context.verbosity = 5
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# general error
action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
def test_action_base__remove_tmp_path(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection._shell.remove.return_value = 'rm some stuff'
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._low_level_execute_command = MagicMock()
# these don't really return anything or raise errors, so
# we're pretty much calling these for coverage right now
action_base._remove_tmp_path('/bad/path/dont/remove')
action_base._remove_tmp_path('/good/path/to/ansible-tmp-thing')
@patch('os.unlink')
@patch('os.fdopen')
@patch('tempfile.mkstemp')
def test_action_base__transfer_data(self, mock_mkstemp, mock_fdopen, mock_unlink):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection.put_file.return_value = None
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
mock_afd = MagicMock()
mock_afile = MagicMock()
mock_mkstemp.return_value = (mock_afd, mock_afile)
mock_unlink.return_value = None
mock_afo = MagicMock()
mock_afo.write.return_value = None
mock_afo.flush.return_value = None
mock_afo.close.return_value = None
mock_fdopen.return_value = mock_afo
self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some data'), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some mixed data: fö〩'), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='some value')), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='fö〩')), '/path/to/remote/file')
mock_afo.write.side_effect = Exception()
self.assertRaises(AnsibleError, action_base._transfer_data, '/path/to/remote/file', '')
def test_action_base__execute_remote_stat(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._execute_module = MagicMock()
# test normal case
action_base._execute_module.return_value = dict(stat=dict(checksum='1111111111111111111111111111111111', exists=True))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertEqual(res['checksum'], '1111111111111111111111111111111111')
# test does not exist
action_base._execute_module.return_value = dict(stat=dict(exists=False))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertFalse(res['exists'])
self.assertEqual(res['checksum'], '1')
# test no checksum in result from _execute_module
action_base._execute_module.return_value = dict(stat=dict(exists=True))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertTrue(res['exists'])
self.assertEqual(res['checksum'], '')
# test stat call failed
action_base._execute_module.return_value = dict(failed=True, msg="because I said so")
self.assertRaises(AnsibleError, action_base._execute_remote_stat, path='/path/to/file', all_vars=dict(), follow=False)
def test_action_base__execute_module(self):
# create our fake task
mock_task = MagicMock()
mock_task.action = 'copy'
mock_task.args = dict(a=1, b=2, c=3)
# create a mock connection, so we don't actually try and connect to things
def build_module_command(env_string, shebang, cmd, arg_path=None):
to_run = [env_string, cmd]
if arg_path:
to_run.append(arg_path)
return " ".join(to_run)
def get_option(option):
return {'admin_users': ['root', 'toor']}.get(option)
mock_connection = MagicMock()
mock_connection.build_module_command.side_effect = build_module_command
mock_connection.socket_path = None
mock_connection._shell.get_remote_filename.return_value = 'copy.py'
mock_connection._shell.join_path.side_effect = os.path.join
mock_connection._shell.tmpdir = '/var/tmp/mytempdir'
mock_connection._shell.get_option = get_option
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
# fake a lot of methods as we test those elsewhere
action_base._configure_module = MagicMock()
action_base._supports_check_mode = MagicMock()
action_base._is_pipelining_enabled = MagicMock()
action_base._make_tmp_path = MagicMock()
action_base._transfer_data = MagicMock()
action_base._compute_environment_string = MagicMock()
action_base._low_level_execute_command = MagicMock()
action_base._fixup_perms2 = MagicMock()
action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._compute_environment_string.return_value = ''
action_base._connection.has_pipelining = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
action_base._low_level_execute_command.return_value = dict(stdout='{"rc": 0, "stdout": "ok"}')
self.assertEqual(action_base._execute_module(module_name=None, module_args=None), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
self.assertEqual(
action_base._execute_module(
module_name='foo',
module_args=dict(z=9, y=8, x=7),
task_vars=dict(a=1)
),
dict(
_ansible_parsed=True,
rc=0,
stdout="ok",
stdout_lines=['ok'],
)
)
# test with needing/removing a remote tmp path
action_base._configure_module.return_value = ('old', '#!/usr/bin/python', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
action_base._configure_module.return_value = ('non_native_want_json', '#!/usr/bin/python', 'this is the module data', 'path')
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
play_context.become = True
play_context.become_user = 'foo'
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
# test an invalid shebang return
action_base._configure_module.return_value = ('new', '', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
self.assertRaises(AnsibleError, action_base._execute_module)
# test with check mode enabled, once with support for check
# mode and once with support disabled to raise an error
play_context.check_mode = True
action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
action_base._supports_check_mode = False
self.assertRaises(AnsibleError, action_base._execute_module)
def test_action_base_sudo_only_if_user_differs(self):
fake_loader = MagicMock()
fake_loader.get_basedir.return_value = os.getcwd()
play_context = PlayContext()
action_base = DerivedActionBase(None, None, play_context, fake_loader, None, None)
action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', '')))
action_base._connection._shell = MagicMock(append_command=MagicMock(return_value=('JOINED CMD')))
play_context.become = True
play_context.become_user = play_context.remote_user = 'root'
play_context.make_become_cmd = MagicMock(return_value='CMD')
action_base._low_level_execute_command('ECHO', sudoable=True)
play_context.make_become_cmd.assert_not_called()
play_context.remote_user = 'apo'
action_base._low_level_execute_command('ECHO', sudoable=True, executable='/bin/csh')
play_context.make_become_cmd.assert_called_once_with("ECHO", executable='/bin/csh')
play_context.make_become_cmd.reset_mock()
become_allow_same_user = C.BECOME_ALLOW_SAME_USER
C.BECOME_ALLOW_SAME_USER = True
try:
play_context.remote_user = 'root'
action_base._low_level_execute_command('ECHO SAME', sudoable=True)
play_context.make_become_cmd.assert_called_once_with("ECHO SAME", executable=None)
finally:
C.BECOME_ALLOW_SAME_USER = become_allow_same_user
class TestActionBaseCleanReturnedData(unittest.TestCase):
def test(self):
fake_loader = DictDataLoader({
})
mock_module_loader = MagicMock()
mock_shared_loader_obj = MagicMock()
mock_shared_loader_obj.module_loader = mock_module_loader
connection_loader_paths = ['/tmp/asdfadf', '/usr/lib64/whatever',
'dfadfasf',
'foo.py',
'.*',
# FIXME: a path with parans breaks the regex
# '(.*)',
'/path/to/ansible/lib/ansible/plugins/connection/custom_connection.py',
'/path/to/ansible/lib/ansible/plugins/connection/ssh.py']
def fake_all(path_only=None):
for path in connection_loader_paths:
yield path
mock_connection_loader = MagicMock()
mock_connection_loader.all = fake_all
mock_shared_loader_obj.connection_loader = mock_connection_loader
mock_connection = MagicMock()
# mock_connection._shell.env_prefix.side_effect = env_prefix
# action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
action_base = DerivedActionBase(task=None,
connection=mock_connection,
play_context=None,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_loader_obj)
data = {'ansible_playbook_python': '/usr/bin/python',
# 'ansible_rsync_path': '/usr/bin/rsync',
'ansible_python_interpreter': '/usr/bin/python',
'ansible_ssh_some_var': 'whatever',
'ansible_ssh_host_key_somehost': 'some key here',
'some_other_var': 'foo bar'}
data = clean_facts(data)
self.assertNotIn('ansible_playbook_python', data)
self.assertNotIn('ansible_python_interpreter', data)
self.assertIn('ansible_ssh_host_key_somehost', data)
self.assertIn('some_other_var', data)
class TestActionBaseParseReturnedData(unittest.TestCase):
def _action_base(self):
fake_loader = DictDataLoader({
})
mock_module_loader = MagicMock()
mock_shared_loader_obj = MagicMock()
mock_shared_loader_obj.module_loader = mock_module_loader
mock_connection_loader = MagicMock()
mock_shared_loader_obj.connection_loader = mock_connection_loader
mock_connection = MagicMock()
action_base = DerivedActionBase(task=None,
connection=mock_connection,
play_context=None,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_loader_obj)
return action_base
def test_fail_no_json(self):
action_base = self._action_base()
rc = 0
stdout = 'foo\nbar\n'
err = 'oopsy'
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertFalse(res['_ansible_parsed'])
self.assertTrue(res['failed'])
self.assertEqual(res['module_stderr'], err)
def test_json_empty(self):
action_base = self._action_base()
rc = 0
stdout = '{}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
del res['_ansible_parsed'] # we always have _ansible_parsed
self.assertEqual(len(res), 0)
self.assertFalse(res)
def test_json_facts(self):
action_base = self._action_base()
rc = 0
stdout = '{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
# TODO: Should this be an AnsibleUnsafe?
# self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
def test_json_facts_add_host(self):
action_base = self._action_base()
rc = 0
stdout = '''{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"},
"add_host": {"host_vars": {"some_key": ["whatever the add_host object is"]}
}
}\n'''
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
self.assertIn('add_host', res)
# TODO: Should this be an AnsibleUnsafe?
# self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
| gpl-3.0 |
ffsdmad/af-web | cgi-bin/plugins2/volontirs/delo_list_stat.py | 1 | 1104 | # -*- coding: utf8 -*-
import libs
SQL = (
("achiv", "SELECT SQL_CALC_FOUND_ROWS ANAME,L1,L2,L3 FROM `af3_arhiv` limit 1;"),
("list_opis","""select @FOND_ID:=FOND_ID as FOND_ID, @OPIS_ID:=OPIS_ID as OPIS_ID, O.OKOD, O.ONAME, G3, G5, O.G7, G46 FROM `af3_opis` O LEFT JOIN `afweb_opis` o on(o.OPIS_ID=O.KOD) %(where)s ;"""),
("fond","""select FOND_ID, F.FKOD,F.FNAME,A1,A5,A7,A9 FROM `afweb_fond_sys` f LEFT JOIN `af3_fond` F on(F.KOD=f.FOND_ID) WHERE KOD=@FOND_ID limit 1;"""),
("rename", """select FOND3.*,C2 as old_name,fond.FKOD,FOND_ID FROM `af3_fond3` LEFT JOIN `afweb_fond` on (FOND_ID=FOND) WHERE FOND=@FOND_ID ORDER BY FKOD,C1 DESC; """),
("stat_volontirs","""select count(1) cnt,FIO from v_sys WHERE OPIS=@OPIS_ID group by uid;"""),
)
FOUND_ROWS = True
ROOT = "show_fond"
ROOT_PREFIX = "<show_list_delo />"
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-web-volontirs.xsl"
EVENT = {"L4":libs.make_str_cdata, }
WHERE = ("OPIS_ID", )
PARAM = None
TITLE="статистика ввода дел волонтёрами"
MESSAGE="опись не содержит дел"
ORDER = None
| gpl-3.0 |
gkoelln/youtube-dl | youtube_dl/extractor/crackle.py | 4 | 6114 | # coding: utf-8
from __future__ import unicode_literals, division
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
parse_duration,
ExtractorError
)
class CrackleIE(InfoExtractor):
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
_TEST = {
# geo restricted to CA
'url': 'https://www.crackle.com/andromeda/2502343',
'info_dict': {
'id': '2502343',
'ext': 'mp4',
'title': 'Under The Night',
'description': 'md5:d2b8ca816579ae8a7bf28bfff8cefc8a',
'duration': 2583,
'view_count': int,
'average_rating': 0,
'age_limit': 14,
'genre': 'Action, Sci-Fi',
'creator': 'Allan Kroeker',
'artist': 'Keith Hamilton Cobb, Kevin Sorbo, Lisa Ryder, Lexa Doig, Robert Hewitt Wolfe',
'release_year': 2000,
'series': 'Andromeda',
'episode': 'Under The Night',
'season_number': 1,
'episode_number': 1,
},
'params': {
# m3u8 download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
country_code = self._downloader.params.get('geo_bypass_country', None)
countries = [country_code] if country_code else (
'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI')
last_e = None
for country in countries:
try:
media = self._download_json(
'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s'
% (video_id, country), video_id,
'Downloading media JSON as %s' % country,
'Unable to download media JSON', query={
'disableProtocols': 'true',
'format': 'json'
})
except ExtractorError as e:
# 401 means geo restriction, trying next country
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
last_e = e
continue
raise
media_urls = media.get('MediaURLs')
if not media_urls or not isinstance(media_urls, list):
continue
title = media['Title']
formats = []
for e in media['MediaURLs']:
if e.get('UseDRM') is True:
continue
format_url = e.get('Path')
if not format_url or not isinstance(format_url, compat_str):
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
description = media.get('Description')
duration = int_or_none(media.get(
'DurationInSeconds')) or parse_duration(media.get('Duration'))
view_count = int_or_none(media.get('CountViews'))
average_rating = float_or_none(media.get('UserRating'))
age_limit = parse_age_limit(media.get('Rating'))
genre = media.get('Genre')
release_year = int_or_none(media.get('ReleaseYear'))
creator = media.get('Directors')
artist = media.get('Cast')
if media.get('MediaTypeDisplayValue') == 'Full Episode':
series = media.get('ShowName')
episode = title
season_number = int_or_none(media.get('Season'))
episode_number = int_or_none(media.get('Episode'))
else:
series = episode = season_number = episode_number = None
subtitles = {}
cc_files = media.get('ClosedCaptionFiles')
if isinstance(cc_files, list):
for cc_file in cc_files:
if not isinstance(cc_file, dict):
continue
cc_url = cc_file.get('Path')
if not cc_url or not isinstance(cc_url, compat_str):
continue
lang = cc_file.get('Locale') or 'en'
subtitles.setdefault(lang, []).append({'url': cc_url})
thumbnails = []
images = media.get('Images')
if isinstance(images, list):
for image_key, image_url in images.items():
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
if not mobj:
continue
thumbnails.append({
'url': image_url,
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
})
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'age_limit': age_limit,
'genre': genre,
'creator': creator,
'artist': artist,
'release_year': release_year,
'series': series,
'episode': episode,
'season_number': season_number,
'episode_number': episode_number,
'thumbnails': thumbnails,
'subtitles': subtitles,
'formats': formats,
}
raise last_e
| unlicense |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/core/magics/logging.py | 5 | 6458 | """Implementation of magic functions for IPython's own logging.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import sys
# Our own packages
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.utils.warn import warn
from IPython.utils.py3compat import str_to_unicode
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class LoggingMagics(Magics):
"""Magics related to all logging machinery."""
@line_magic
def logstart(self, parameter_s=''):
"""Start logging anywhere in a session.
%logstart [-o|-r|-t] [log_name [log_mode]]
If no name is given, it defaults to a file named 'ipython_log.py' in your
current directory, in 'rotate' mode (see below).
'%logstart name' saves to file 'name' in 'backup' mode. It saves your
history up to that point and then continues logging.
%logstart takes a second optional parameter: logging mode. This can be one
of (note that the modes are given unquoted):\\
append: well, that says it.\\
backup: rename (if exists) to name~ and start name.\\
global: single logfile in your home dir, appended to.\\
over : overwrite existing log.\\
rotate: create rotating logs name.1~, name.2~, etc.
Options:
-o: log also IPython's output. In this mode, all commands which
generate an Out[NN] prompt are recorded to the logfile, right after
their corresponding input line. The output lines are always
prepended with a '#[Out]# ' marker, so that the log remains valid
Python code.
Since this marker is always the same, filtering only the output from
a log is very easy, using for example a simple awk call::
awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
-r: log 'raw' input. Normally, IPython's logs contain the processed
input, so that user lines are logged in their final form, converted
into valid Python. For example, %Exit is logged as
_ip.magic("Exit"). If the -r flag is given, all input is logged
exactly as typed, with no transformations applied.
-t: put timestamps before each input line logged (these are put in
comments)."""
opts,par = self.parse_options(parameter_s,'ort')
log_output = 'o' in opts
log_raw_input = 'r' in opts
timestamp = 't' in opts
logger = self.shell.logger
# if no args are given, the defaults set in the logger constructor by
# ipython remain valid
if par:
try:
logfname,logmode = par.split()
except:
logfname = par
logmode = 'backup'
else:
logfname = logger.logfname
logmode = logger.logmode
# put logfname into rc struct as if it had been called on the command
# line, so it ends up saved in the log header Save it in case we need
# to restore it...
old_logfile = self.shell.logfile
if logfname:
logfname = os.path.expanduser(logfname)
self.shell.logfile = logfname
loghead = u'# IPython log file\n\n'
try:
logger.logstart(logfname, loghead, logmode, log_output, timestamp,
log_raw_input)
except:
self.shell.logfile = old_logfile
warn("Couldn't start log: %s" % sys.exc_info()[1])
else:
# log input history up to this point, optionally interleaving
# output if requested
if timestamp:
# disable timestamping for the previous history, since we've
# lost those already (no time machine here).
logger.timestamp = False
if log_raw_input:
input_hist = self.shell.history_manager.input_hist_raw
else:
input_hist = self.shell.history_manager.input_hist_parsed
if log_output:
log_write = logger.log_write
output_hist = self.shell.history_manager.output_hist
for n in range(1,len(input_hist)-1):
log_write(input_hist[n].rstrip() + u'\n')
if n in output_hist:
log_write(str_to_unicode(repr(output_hist[n])),'output')
else:
logger.log_write(u'\n'.join(input_hist[1:]))
logger.log_write(u'\n')
if timestamp:
# re-enable timestamping
logger.timestamp = True
print ('Activating auto-logging. '
'Current session state plus future input saved.')
logger.logstate()
@line_magic
def logstop(self, parameter_s=''):
"""Fully stop logging and close log file.
In order to start logging again, a new %logstart call needs to be made,
possibly (though not necessarily) with a new filename, mode and other
options."""
self.shell.logger.logstop()
@line_magic
def logoff(self, parameter_s=''):
"""Temporarily stop logging.
You must have previously started logging."""
self.shell.logger.switch_log(0)
@line_magic
def logon(self, parameter_s=''):
"""Restart logging.
This function is for restarting logging which you've temporarily
stopped with %logoff. For starting logging for the first time, you
must use the %logstart function, which allows you to specify an
optional log filename."""
self.shell.logger.switch_log(1)
@line_magic
def logstate(self, parameter_s=''):
"""Print the status of the logging system."""
self.shell.logger.logstate()
| apache-2.0 |
drammock/mne-python | mne/viz/_brain/_linkviewer.py | 8 | 5503 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import numpy as np
from ...utils import warn
class _LinkViewer(object):
"""Class to link multiple Brain objects."""
def __init__(self, brains, time=True, camera=False, colorbar=True,
picking=False):
self.brains = brains
self.leader = self.brains[0] # select a brain as leader
# check time infos
times = [brain._times for brain in brains]
if time and not all(np.allclose(x, times[0]) for x in times):
warn('stc.times do not match, not linking time')
time = False
if camera:
self.link_cameras()
if time:
# link time sliders
self.link_widgets(
name="time",
callback=self.set_time_point,
signal_type="valueChanged",
)
# link playback speed sliders
self.link_widgets(
name="playback_speed",
callback=self.set_playback_speed,
signal_type="valueChanged",
)
# link toggle to start/pause playback
self.link_widgets(
name="play",
callback=self.toggle_playback,
signal_type="triggered",
actions=True,
)
# link time course canvas
def _time_func(*args, **kwargs):
for brain in self.brains:
brain.callbacks["time"](*args, **kwargs)
for brain in self.brains:
if brain.show_traces:
brain.mpl_canvas.time_func = _time_func
if picking:
def _func_add(*args, **kwargs):
for brain in self.brains:
brain._add_vertex_glyph2(*args, **kwargs)
brain.plotter.update()
def _func_remove(*args, **kwargs):
for brain in self.brains:
brain._remove_vertex_glyph2(*args, **kwargs)
# save initial picked points
initial_points = dict()
for hemi in ('lh', 'rh'):
initial_points[hemi] = set()
for brain in self.brains:
initial_points[hemi] |= \
set(brain.picked_points[hemi])
# link the viewers
for brain in self.brains:
brain.clear_glyphs()
brain._add_vertex_glyph2 = brain._add_vertex_glyph
brain._add_vertex_glyph = _func_add
brain._remove_vertex_glyph2 = brain._remove_vertex_glyph
brain._remove_vertex_glyph = _func_remove
# link the initial points
for hemi in initial_points.keys():
if hemi in brain._layered_meshes:
mesh = brain._layered_meshes[hemi]._polydata
for vertex_id in initial_points[hemi]:
self.leader._add_vertex_glyph(hemi, mesh, vertex_id)
if colorbar:
fmin = self.leader._data["fmin"]
fmid = self.leader._data["fmid"]
fmax = self.leader._data["fmax"]
for brain in self.brains:
brain.callbacks["fmin"](fmin)
brain.callbacks["fmid"](fmid)
brain.callbacks["fmax"](fmax)
for name in ('fmin', 'fmid', 'fmax'):
func = getattr(self, "set_" + name)
self.link_widgets(
name=name,
callback=func,
signal_type="valueChanged"
)
def set_fmin(self, value):
for brain in self.brains:
brain.callbacks["fmin"](value)
def set_fmid(self, value):
for brain in self.brains:
brain.callbacks["fmid"](value)
def set_fmax(self, value):
for brain in self.brains:
brain.callbacks["fmax"](value)
def set_time_point(self, value):
for brain in self.brains:
brain.callbacks["time"](value, update_widget=True)
def set_playback_speed(self, value):
for brain in self.brains:
brain.callbacks["playback_speed"](value, update_widget=True)
def toggle_playback(self):
value = self.leader.callbacks["time"].widget.get_value()
# synchronize starting points before playback
self.set_time_point(value)
for brain in self.brains:
brain.toggle_playback()
def link_widgets(self, name, callback, signal_type, actions=False):
for brain in self.brains:
if actions:
widget = brain._renderer.actions[name]
else:
widget = brain.widgets[name].widget
if widget is not None:
signal = getattr(widget, signal_type)
signal.disconnect()
signal.connect(callback)
def link_cameras(self):
from ..backends._pyvista import _add_camera_callback
def _update_camera(vtk_picker, event):
for brain in self.brains:
brain.plotter.update()
camera = self.leader.plotter.camera
_add_camera_callback(camera, _update_camera)
for brain in self.brains:
for renderer in brain.plotter.renderers:
renderer.camera = camera
| bsd-3-clause |
xianggong/m2c_unit_test | test/integer/mad_hi_ulong4ulong4ulong4/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 |
rzambre/servo | python/servo/post_build_commands.py | 7 | 9270 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import os
import os.path as path
import subprocess
from shutil import copytree, rmtree, copy2
from mach.registrar import Registrar
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import (
CommandBase,
call, check_call,
is_windows, is_macosx, set_osmesa_env,
get_browserhtml_path,
)
def read_file(filename, if_exists=False):
if if_exists and not path.exists(filename):
return None
with open(filename) as f:
return f.read()
@CommandProvider
class PostBuildCommands(CommandBase):
@Command('run',
description='Run Servo',
category='post-build')
@CommandArgument('--release', '-r', action='store_true',
help='Run the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Run the dev build')
@CommandArgument('--android', action='store_true', default=None,
help='Run on an Android device through `adb shell`')
@CommandArgument('--debug', action='store_true',
help='Enable the debugger. Not specifying a '
'--debugger option will result in the default '
'debugger being used. The following arguments '
'have no effect without this.')
@CommandArgument('--debugger', default=None, type=str,
help='Name of debugger to use.')
@CommandArgument('--browserhtml', '-b', action='store_true',
help='Launch with Browser.html')
@CommandArgument('--headless', '-z', action='store_true',
help='Launch in headless mode')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
def run(self, params, release=False, dev=False, android=None, debug=False, debugger=None, browserhtml=False,
headless=False):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
# Make --debugger imply --debug
if debugger:
debug = True
if android is None:
android = self.config["build"]["android"]
if android:
if debug:
print("Android on-device debugging is not supported by mach yet. See")
print("https://github.com/servo/servo/wiki/Building-for-Android#debugging-on-device")
return
script = [
"am force-stop com.mozilla.servo",
"echo servo >/sdcard/servo/android_params"
]
for param in params:
script += [
"echo '%s' >>/sdcard/servo/android_params" % param.replace("'", "\\'")
]
script += [
"am start com.mozilla.servo/com.mozilla.servo.MainActivity",
"exit"
]
shell = subprocess.Popen(["adb", "shell"], stdin=subprocess.PIPE)
shell.communicate("\n".join(script) + "\n")
return shell.wait()
args = [self.get_binary_path(release, dev)]
if browserhtml:
browserhtml_path = get_browserhtml_path(args[0])
if is_macosx():
# Enable borderless on OSX
args = args + ['-b']
elif is_windows():
# Convert to a relative path to avoid mingw -> Windows path conversions
browserhtml_path = path.relpath(browserhtml_path, os.getcwd())
args = args + ['--pref', 'dom.mozbrowser.enabled',
'--pref', 'dom.forcetouch.enabled',
'--pref', 'shell.builtin-key-shortcuts.enabled=false',
path.join(browserhtml_path, 'index.html')]
if headless:
set_osmesa_env(args[0], env)
args.append('-z')
# Borrowed and modified from:
# http://hg.mozilla.org/mozilla-central/file/c9cfa9b91dea/python/mozbuild/mozbuild/mach_commands.py#l883
if debug:
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking)
self.debuggerInfo = mozdebug.get_debugger_info(debugger)
if not self.debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
command = self.debuggerInfo.path
if debugger == 'gdb' or debugger == 'lldb':
rustCommand = 'rust-' + debugger
try:
subprocess.check_call([rustCommand, '--version'], env=env, stdout=open(os.devnull, 'w'))
except (OSError, subprocess.CalledProcessError):
pass
else:
command = rustCommand
# Prepend the debugger args.
args = ([command] + self.debuggerInfo.args +
args + params)
else:
args = args + params
try:
check_call(args, env=env)
except subprocess.CalledProcessError as e:
print("Servo exited with return value %d" % e.returncode)
return e.returncode
except OSError as e:
if e.errno == 2:
print("Servo Binary can't be found! Run './mach build'"
" and try again!")
else:
raise e
@Command('rr-record',
description='Run Servo whilst recording execution with rr',
category='post-build')
@CommandArgument('--release', '-r', action='store_true',
help='Use release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Use dev build')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
def rr_record(self, release=False, dev=False, params=[]):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
servo_cmd = [self.get_binary_path(release, dev)] + params
rr_cmd = ['rr', '--fatal-errors', 'record']
try:
check_call(rr_cmd + servo_cmd)
except OSError as e:
if e.errno == 2:
print("rr binary can't be found!")
else:
raise e
@Command('rr-replay',
description='Replay the most recent execution of Servo that was recorded with rr',
category='post-build')
def rr_replay(self):
try:
check_call(['rr', '--fatal-errors', 'replay'])
except OSError as e:
if e.errno == 2:
print("rr binary can't be found!")
else:
raise e
@Command('doc',
description='Generate documentation',
category='post-build')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to cargo doc")
def doc(self, params):
self.ensure_bootstrapped()
if not path.exists(path.join(self.config["tools"]["rust-root"], "doc")):
Registrar.dispatch("bootstrap-rust-docs", context=self.context)
rust_docs = path.join(self.config["tools"]["rust-root"], "doc")
docs = path.join(self.get_target_dir(), "doc")
if not path.exists(docs):
os.makedirs(docs)
if read_file(path.join(docs, "version_info.html"), if_exists=True) != \
read_file(path.join(rust_docs, "version_info.html")):
print("Copying Rust documentation.")
# copytree doesn't like the destination already existing.
for name in os.listdir(rust_docs):
if not name.startswith('.'):
full_name = path.join(rust_docs, name)
destination = path.join(docs, name)
if path.isdir(full_name):
if path.exists(destination):
rmtree(destination)
copytree(full_name, destination)
else:
copy2(full_name, destination)
return call(["cargo", "doc"] + params,
env=self.build_env(), cwd=self.servo_crate())
@Command('browse-doc',
description='Generate documentation and open it in a web browser',
category='post-build')
def serve_docs(self):
self.doc([])
import webbrowser
webbrowser.open("file://" + path.abspath(path.join(
self.get_target_dir(), "doc", "servo", "index.html")))
| mpl-2.0 |
ohjay/sysadmin-scripts | video2images.py | 1 | 1558 | #!/usr/bin/env python
import os
import cv2
import numpy as np
from scipy.misc import imresize
from scipy.signal import medfilt
### Parameters (you should edit these)
output_folder = '.'
video_path = 'angrywalk.mp4'
### ----------------------------------
def get_fps(capture):
if int((cv2.__version__).split('.')[0]) < 3:
return capture.get(cv2.cv.CV_CAP_PROP_FPS)
else:
return capture.get(cv2.CAP_PROP_FPS)
def output_path(basename):
return os.path.join(output_folder, basename)
def segment(im):
"""(Simple) segmentation mask extraction.
Subtract the median, then apply a median filter.
Might do the trick if the background is all one color.
"""
mask = np.abs(im - np.median(im)).astype(np.bool)
mask = mask.astype(np.float) * 255.0
mask = medfilt(mask, kernel_size=5)
mask = np.all(mask == [255, 255, 255], axis=-1)
return mask.astype(np.float) * 255.0
# Extract frames from video
capture = cv2.VideoCapture(video_path)
# Determine the frame rate
fps = get_fps(capture)
print('[o] Processing frames from %r FPS video...' % fps)
num_frames = 0
success = True
while success:
success, frame = capture.read()
if success:
# Process frame here
# e.g. resize, crop, segment, ...
smask = segment(frame)
# Write output(s)
cv2.imwrite(output_path('frame%d.jpg' % num_frames), frame)
cv2.imwrite(output_path('smask%d.jpg' % num_frames), smask)
num_frames += 1
print('[+] Processed %r frames from `%s`.' % (num_frames, video_path))
| mit |
winnukem/racehound | examples/events.py | 1 | 8304 | #!/usr/bin/env python3
# This example demonstrates how to control the set of the locations in the
# binary code of the kernel monitored by RaceHound.
#
# This can be used, for example, to create a system that dynamically adds
# and removes the breakpoints according to some policy to sweep through
# the given area of the kernel and find data races there. Might be similar
# to what DataCollider does for MS Windows. This example does not do this
# yet, however.
#
# The point is, one no longer needs to hack the kernel-mode components to do
# such sweeping with RaceHound. The policies can now be implemented in the
# user space using the interface provided by the kernel-mode part of
# RaceHound via the following files in debugfs:
#
# * racehound/breakpoints - write data here to add or remove the
# breakpoints on the code locations to be monitored. Reading from this
# file lists the currently set breakpoints.
#
# * racehound/events - poll this file to be notified when the breakpoints
# hit or races are found, then read from it to find which events have
# happened. Reading from this file removes the events from the memory
# buffer associated with this file. If the breakpoints hit very often
# and the reader does not keep up, the buffer may become full and the
# new events will be discarded.
#
# That is it.
#
# Note that Python 3.4 or newer is needed here.
#
# Usage (run the script as root):
# events.py [--max-hits=N] [-q]
#
# This script waits on /sys/kernel/debug/racehound/events file and outputs
# information about the events as soon as it is available in that file.
#
# Additionally, if '--max-hits=N' is specified, the breakpoints that hit N
# times or more will be removed.
#
# If -q (--quiet) is present, the script wiil only output a summary of the
# found races at the exit. It will not output the current events when it
# reads them.
#
# The script assumes debugfs is mounted to /sys/kernel/debug/.
import sys
import os.path
import selectors
import argparse
import re
BPS_FILE = '/sys/kernel/debug/racehound/breakpoints'
EVENTS_FILE = '/sys/kernel/debug/racehound/events'
ERR_NO_FILES = ''.join([
'Please check that debugfs is mounted to /sys/kernel/debug ',
'and kernel module \"racehound\" is loaded.'])
RE_RACE = re.compile(' '.join([
r'Detected a data race on the memory block at (0x)?[0-9a-f]+',
r'between the instruction at ([^ \t]+) \(comm: \"(.+)\"\)',
r'and the instruction right before (.*) \(comm: \"(.+)\"\)']))
RE_RREAD = re.compile(' '.join([
r'Detected a data race on the memory block at (0x)?[0-9a-f]+',
r'that is about to be accessed by the instruction at ([^ \t]+)',
r'\(comm: \"(.+)\"\):',
r'the memory block was modified during the delay']))
def positive_int(string):
value = int(string)
if value <= 0:
msg = "%r is not a positive integer" % string
raise argparse.ArgumentTypeError(msg)
return value
class RaceGroup(object):
'''A group of races between a given pair of instructions
'insn' - address of the instruction that under watch,
'insn_comm' - 'comm' of the process that executed 'insn',
'conflict_insn' - the address right after the instruction that performed
a conflicting memory access (None if unknown),
'conflict_comm' - 'comm' of the process that executed 'conflict_insn'
(None if unknown).
'''
def __init__(self, insn, insn_comm, conflict_insn=None,
conflict_comm=None):
self.insn = insn
self.insn_comms = [insn_comm]
self.conflict_insn = conflict_insn
if conflict_comm:
self.conflict_comms = [conflict_comm]
else:
self.conflict_comms = []
# How many times this race was reported.
self.count = 1
def print_races(self):
if self.conflict_insn:
print('Race between %s and the insn right before %s.' %
(self.insn, self.conflict_insn))
comms = list(set(self.insn_comms))
print('The first insn was executed by:', ', '.join(comms))
comms = list(set(self.conflict_comms))
print('The second insn was executed by:', ', '.join(comms))
else:
print('Race between %s and some other code.' % self.insn)
comms = list(set(self.insn_comms))
print('The insn was executed by:', ', '.join(comms))
print('The race was reported %d time(s).' % self.count)
def store_race_info(races, str_race):
matched = re.search(RE_RACE, str_race)
if matched:
_, insn, insn_comm, conflict_insn, conflict_comm = matched.groups()
key = insn + '#' + conflict_insn
else:
matched = re.search(RE_RREAD, str_race)
if not matched:
sys.stderr.write(
'Unknown format of a race report: "%s".\n' % str_race)
return
_, insn, insn_comm = matched.groups()
conflict_insn = None
conflict_comm = None
key = insn
if key in races:
races[key].count = races[key].count + 1
races[key].insn_comms.append(insn_comm)
if conflict_comm:
races[key].conflict_comms.append(conflict_comm)
else:
races[key] = RaceGroup(
insn, insn_comm, conflict_insn, conflict_comm)
def print_summary(races):
if not races:
print('No races found.')
else:
for _, grp in races.items():
grp.print_races()
print('')
def remove_bp(bp):
'''Tell RaceHound to remove the given BP.
See the Readme for RaceHound for the format and detais.'''
with open(BPS_FILE, 'w') as f:
f.write('-%s\n' % bp)
if __name__ == '__main__':
desc = 'Demo for the API to control RaceHound from user space.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--max-hits', metavar='N', nargs='?', type=positive_int, default=0,
help='disable the breakpoint if it hits N times or more')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='do not output the events, just print a summary at exit')
args = parser.parse_args()
# Mapping: {racing_insns => race_info}
races = {}
for fl in [BPS_FILE, EVENTS_FILE]:
if not os.path.exists(fl):
sys.stderr.write('File not found: %s.\n' % fl)
sys.stderr.write(ERR_NO_FILES)
sys.stderr.write('\n')
sys.exit(1)
sel = selectors.DefaultSelector()
with open(EVENTS_FILE, 'r') as f:
sel.register(f, selectors.EVENT_READ)
bp_hits = {} # The mapping {BP_string, number_of_hits}
# Poll the "events" file and read the lines from it as they become
# available.
# If the user presses Ctrl-C, just exit.
try:
while True:
events = sel.select()
for key, mask in events:
for line in f:
if line.startswith('[race]'):
line = line.rstrip()
store_race_info(races, line)
if not args.quiet:
print(line)
continue
bp = line.rstrip()
if not args.quiet:
print('BP hit:', bp)
# Count the number of hits.
# If --max-hits=N is specified and the BP was hit
# N times, remove it. Note that the BP may be hit
# a few more times after this before it is actually
# removed.
if not bp in bp_hits:
bp_hits[bp] = 1
else:
bp_hits[bp] = bp_hits[bp] + 1
if bp_hits[bp] == args.max_hits:
if not args.quiet:
print(
'BP %s was hit %d time(s), removing it' %
(bp, args.max_hits))
remove_bp(bp)
except KeyboardInterrupt:
print_summary(races)
sys.exit(1)
| gpl-2.0 |
arruda/rmr | rmr/libs/utils/decorators.py | 1 | 2387 | # -*- coding: utf-8 -*-
"""
libs.utils.decorators
~~~~~~~~~~~~~~
here are the generic decorators for RMR
:copyright: (c) 2012 by arruda.
"""
from functools import wraps
from django.utils import simplejson
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
def ajax_login_required(view_func):
@wraps(view_func)
def wrap(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
json = simplejson.dumps({ 'not_authenticated': True })
return HttpResponse(json, mimetype='application/json')
return wrap
class JsonResponse(HttpResponse):
"""
HttpResponse descendant, which return response with ``application/json`` mimetype.
"""
def __init__(self, data):
super(JsonResponse, self).__init__(content=simplejson.dumps(data,cls=DjangoJSONEncoder), mimetype='application/json')
def ajax_request(func):
"""
Based on django-annoying
If view returned serializable dict, returns JsonResponse with this dict as content.
example:
@ajax_request
def my_view(request):
news = News.objects.all()
news_titles = [entry.title for entry in news]
return {'news_titles': news_titles}
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if isinstance(response, dict):
return JsonResponse(response)
else:
return response
#def requirement_check(function):
# """
# Check if the user logged in is a owner of the given enterprise(enterprise_id kwarg)
# """
# from enterprises.models import Enterprise, EnterpriseMember
# @wraps(function)
# def wrapper(request, *args, **kwargs):
# enterprise = Enterprise.get_from_user_or_404(request.user)
#
# get_kwargs = {
# 'enterprise': enterprise,
# 'user':request.user,
# }
# if owner:
# get_kwargs['member_type'] = EnterpriseMember.MEMBER_TYPE.owner
#
# member = get_object_or_404(EnterpriseMember, **get_kwargs)
#
# return function(request, *args, **kwargs)
#
# return wrapper | mit |
alxgu/ansible | lib/ansible/module_utils/network/frr/providers/cli/config/base.py | 76 | 2532 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.common.config import NetworkConfig
class ConfigBase(object):
argument_spec = {}
mutually_exclusive = []
identifier = ()
def __init__(self, **kwargs):
self.values = {}
self._rendered_configuration = {}
self.active_configuration = None
for item in self.identifier:
self.values[item] = kwargs.pop(item)
for key, value in iteritems(kwargs):
if key in self.argument_spec:
setattr(self, key, value)
for key, value in iteritems(self.argument_spec):
if value.get('default'):
if not getattr(self, key, None):
setattr(self, key, value.get('default'))
def __getattr__(self, key):
if key in self.argument_spec:
return self.values.get(key)
def __setattr__(self, key, value):
if key in self.argument_spec:
if key in self.identifier:
raise TypeError('cannot set value')
elif value is not None:
self.values[key] = value
else:
super(ConfigBase, self).__setattr__(key, value)
def context_config(self, cmd):
if 'context' not in self._rendered_configuration:
self._rendered_configuration['context'] = list()
self._rendered_configuration['context'].extend(to_list(cmd))
def global_config(self, cmd):
if 'global' not in self._rendered_configuration:
self._rendered_configuration['global'] = list()
self._rendered_configuration['global'].extend(to_list(cmd))
def get_rendered_configuration(self):
config = list()
for section in ('context', 'global'):
config.extend(self._rendered_configuration.get(section, []))
return config
def set_active_configuration(self, config):
self.active_configuration = config
def render(self, config=None):
raise NotImplementedError
def get_section(self, config, section):
if config is not None:
netcfg = NetworkConfig(indent=1, contents=config)
try:
config = netcfg.get_block_config(to_list(section))
except ValueError:
config = None
return config
| gpl-3.0 |
keisuke-umezawa/chainer | tests/chainer_tests/links_tests/connection_tests/test_zoneoutlstm.py | 3 | 7915 | import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _zoneoutlstm(func, c, h, x, c_creator, h_creator):
device = backend.get_device_from_array(x)
with chainer.using_device(device):
xp = device.xp
lstm_in = x.dot(func.upward.W.data.T)
lstm_in += h.dot(func.lateral.W.data.T)
lstm_in = xp.reshape(lstm_in, (len(lstm_in),
lstm_in.shape[1] // 4,
4))
a, i, f, o = xp.split(lstm_in, 4, 2)
a = xp.reshape(a, (len(a), a.shape[1]))
i = xp.reshape(i, (len(i), i.shape[1]))
f = xp.reshape(f, (len(f), f.shape[1]))
o = xp.reshape(o, (len(o), o.shape[1]))
c_tmp = xp.tanh(a) * _sigmoid(i) + _sigmoid(f) * c
c_next = c * c_creator.flag_h + c_tmp * c_creator.flag_x
h_next = h * h_creator.flag_h + \
(_sigmoid(o) * xp.tanh(c_tmp)) * h_creator.flag_x
return c_next, h_next
@testing.parameterize(
{'in_size': 10, 'out_size': 10, 'c_ratio': 0.5, 'h_ratio': 0.25},
{'in_size': 10, 'out_size': 40, 'c_ratio': 0.25, 'h_ratio': 0.5},
{'in_size': 10, 'out_size': 10, 'c_ratio': 0.3, 'h_ratio': 0.3},
{'in_size': 10, 'out_size': 10, 'c_ratio': 1.0, 'h_ratio': 1.0},
{'in_size': 10, 'out_size': 40, 'c_ratio': 0.0, 'h_ratio': 0.0},
)
class TestZoneoutlstm(unittest.TestCase):
def setUp(self):
self.link = links.StatefulZoneoutLSTM(self.in_size, self.out_size,
c_ratio=self.c_ratio,
h_ratio=self.h_ratio)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
c_shape = (4, self.out_size)
h_shape = (4, self.out_size)
x_shape = (4, self.in_size)
gy_shape = (4, self.out_size)
self.c = numpy.zeros(c_shape).astype(numpy.float32)
self.h = numpy.zeros(h_shape).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
def _forward(self, link, x):
return link(x)
def check_forward(self, c_data, h_data, x_data):
x = chainer.Variable(x_data)
h1 = self.link(x)
c1 = self.link.c
c1_expect, h1_expect = _zoneoutlstm(self.link, c_data, h_data,
x_data, c1.creator, h1.creator)
testing.assert_allclose(h1.data, h1_expect)
testing.assert_allclose(self.link.c.data, c1_expect)
testing.assert_allclose(self.link.h.data, h1_expect)
h2 = self.link(x)
c2 = self.link.c
c2_expect, h2_expect = _zoneoutlstm(self.link, c1_expect, h1_expect,
x_data, c2.creator, h2.creator)
testing.assert_allclose(h2.data, h2_expect)
testing.assert_allclose(self.link.c.data, c2_expect)
testing.assert_allclose(self.link.h.data, h2_expect)
def test_forward_cpu(self):
self.check_forward(self.c, self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
self.link.to_gpu()
c = cuda.to_gpu(self.c)
h = cuda.to_gpu(self.h)
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(c, h, x)
def check_backward(self, c_data, h_data, x_data, y_grad):
x = chainer.Variable(x_data)
y = self._forward(self.link, x)
c = self.link.c
d = {'c_creator': c.creator, 'y_creator': y.creator}
y.grad = y_grad
y.backward()
def f():
c_creator = d['c_creator']
y_creator = d['y_creator']
c, y = _zoneoutlstm(self.link, c_data, h_data,
x_data, c_creator, y_creator)
return y,
gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.c, self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
class TestZoneoutState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulZoneoutLSTM(in_size, out_size)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
self.link.to_gpu()
self.check_reset_state()
class TestZoneoutToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulZoneoutLSTM(in_size, out_size)
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
def check_to_cpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.c, self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu(self.c, self.h)
def check_to_cpu_to_gpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.c, self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.c, self.h)
testing.run_module(__name__, __file__)
| mit |
happyleavesaoc/home-assistant | homeassistant/components/sensor/rflink.py | 7 | 4096 | """
Support for Rflink sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.rflink/
"""
import asyncio
from functools import partial
import logging
from homeassistant.components.rflink import (
CONF_ALIASSES, CONF_AUTOMATIC_ADD, CONF_DEVICES, DATA_DEVICE_REGISTER,
DATA_ENTITY_LOOKUP, DOMAIN, EVENT_KEY_ID, EVENT_KEY_SENSOR, EVENT_KEY_UNIT,
RflinkDevice, cv, vol)
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT, CONF_NAME, CONF_PLATFORM,
CONF_UNIT_OF_MEASUREMENT)
DEPENDENCIES = ['rflink']
_LOGGER = logging.getLogger(__name__)
SENSOR_ICONS = {
'humidity': 'mdi:water-percent',
'battery': 'mdi:battery',
'temperature': 'mdi:thermometer',
}
CONF_SENSOR_TYPE = 'sensor_type'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): DOMAIN,
vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): vol.Schema({
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=None): cv.string,
vol.Optional(CONF_ALIASSES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
},
}),
})
def lookup_unit_for_sensor_type(sensor_type):
"""Get unit for sensor type.
Async friendly.
"""
from rflink.parser import UNITS, PACKET_FIELDS
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}
return UNITS.get(field_abbrev.get(sensor_type))
def devices_from_config(domain_config, hass=None):
"""Parse configuration and add Rflink sensor devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
if not config[ATTR_UNIT_OF_MEASUREMENT]:
config[ATTR_UNIT_OF_MEASUREMENT] = lookup_unit_for_sensor_type(
config[CONF_SENSOR_TYPE])
device = RflinkSensor(device_id, hass, **config)
devices.append(device)
# Register entity to listen to incoming rflink events
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_SENSOR][device_id].append(device)
return devices
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Rflink platform."""
async_add_devices(devices_from_config(config, hass))
@asyncio.coroutine
def add_new_device(event):
"""Check if device is known, otherwise create device entity."""
device_id = event[EVENT_KEY_ID]
rflinksensor = partial(RflinkSensor, device_id, hass)
device = rflinksensor(event[EVENT_KEY_SENSOR], event[EVENT_KEY_UNIT])
# Add device entity
async_add_devices([device])
# Register entity to listen to incoming rflink events
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_SENSOR][device_id].append(device)
# Schedule task to process event after entity is created
hass.async_add_job(device.handle_event, event)
if config[CONF_AUTOMATIC_ADD]:
hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_SENSOR] = add_new_device
class RflinkSensor(RflinkDevice):
"""Representation of a Rflink sensor."""
def __init__(self, device_id, hass, sensor_type, unit_of_measurement,
**kwargs):
"""Handle sensor specific args and super init."""
self._sensor_type = sensor_type
self._unit_of_measurement = unit_of_measurement
super().__init__(device_id, hass, **kwargs)
def _handle_event(self, event):
"""Domain specific event handler."""
self._state = event['value']
@property
def unit_of_measurement(self):
"""Return measurement unit."""
return self._unit_of_measurement
@property
def state(self):
"""Return value."""
return self._state
@property
def icon(self):
"""Return possible sensor specific icon."""
if self._sensor_type in SENSOR_ICONS:
return SENSOR_ICONS[self._sensor_type]
| apache-2.0 |
hhclam/bazel | third_party/py/mock/tests/testmagicmethods.py | 109 | 14863 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import unittest2, inPy3k
try:
unicode
except NameError:
# Python 3
unicode = str
long = int
import inspect
import sys
from mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest2.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
@unittest2.skipIf(inPy3k, "no unicode in Python 3")
def test_unicode(self):
mock = Mock()
self.assertEqual(unicode(mock), unicode(str(mock)))
mock.__unicode__ = lambda s: unicode('foo')
self.assertEqual(unicode(mock), unicode('foo'))
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
@unittest2.skipIf(inPy3k, 'no truediv in Python 3')
def test_truediv(self):
mock = MagicMock()
mock.__truediv__.return_value = 6
context = {'mock': mock}
code = 'from __future__ import division\nresult = mock / 7\n'
exec(code, context)
self.assertEqual(context['result'], 6)
mock.__rtruediv__.return_value = 3
code = 'from __future__ import division\nresult = 2 / mock\n'
exec(code, context)
self.assertEqual(context['result'], 3)
@unittest2.skipIf(not inPy3k, 'truediv is available in Python 2')
def test_no_truediv(self):
self.assertRaises(
AttributeError, getattr, MagicMock(), '__truediv__'
)
self.assertRaises(
AttributeError, getattr, MagicMock(), '__rtruediv__'
)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
nonzero = lambda s: False
if not inPy3k:
m.__nonzero__ = nonzero
else:
m.__bool__ = nonzero
self.assertFalse(bool(m))
def test_comparison(self):
# note: this test fails with Jython 2.5.1 due to a Jython bug
# it is fixed in jython 2.5.2
if not inPy3k:
# incomparable in Python 3
self. assertEqual(Mock() < 3, object() < 3)
self. assertEqual(Mock() > 3, object() > 3)
self. assertEqual(Mock() <= 3, object() <= 3)
self. assertEqual(Mock() >= 3, object() >= 3)
else:
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
name = '__nonzero__'
other = '__bool__'
if inPy3k:
name, other = other, name
getattr(mock, name).return_value = False
self.assertFalse(hasattr(mock, other))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertEqual(long(mock), long(1))
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertEqual(unicode(mock), object.__str__(mock))
self.assertIsInstance(unicode(mock), unicode)
self.assertTrue(bool(mock))
if not inPy3k:
self.assertEqual(oct(mock), '1')
else:
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
@unittest2.skipIf(inPy3k, "no __cmp__ in Python 3")
def test_non_default_magic_methods(self):
mock = MagicMock()
self.assertRaises(AttributeError, lambda: mock.__cmp__)
mock = Mock()
mock.__cmp__ = lambda s, o: 0
self.assertEqual(mock, object())
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegexp(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest2.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
tersmitten/ansible | packaging/sdist/check-link-behavior.py | 114 | 1290 | #!/usr/bin/env python
"""Checks for link behavior required for sdist to retain symlinks."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import shutil
import sys
import tempfile
def main():
"""Main program entry point."""
temp_dir = tempfile.mkdtemp()
target_path = os.path.join(temp_dir, 'file.txt')
symlink_path = os.path.join(temp_dir, 'symlink.txt')
hardlink_path = os.path.join(temp_dir, 'hardlink.txt')
try:
with open(target_path, 'w'):
pass
os.symlink(target_path, symlink_path)
os.link(symlink_path, hardlink_path)
if not os.path.islink(symlink_path):
abort('Symbolic link not created.')
if not os.path.islink(hardlink_path):
# known issue on MacOS (Darwin)
abort('Hard link of symbolic link created as a regular file.')
finally:
shutil.rmtree(temp_dir)
def abort(reason):
"""
:type reason: str
"""
sys.exit('ERROR: %s\n'
'This will prevent symbolic links from being preserved in the resulting tarball.\n'
'Aborting creation of sdist on platform: %s'
% (reason, platform.system()))
if __name__ == '__main__':
main()
| gpl-3.0 |
codeworm96/shadowsocks | shadowsocks/common.py | 945 | 8921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
garnertb/redis-py | tests/test_pubsub.py | 43 | 14956 | from __future__ import with_statement
import pytest
import time
import redis
from redis.exceptions import ConnectionError
from redis._compat import basestring, u, unichr
from .conftest import r as _redis_client
def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
message = pubsub.get_message(
ignore_subscribe_messages=ignore_subscribe_messages)
if message is not None:
return message
time.sleep(0.01)
now = time.time()
return None
def make_message(type, channel, data, pattern=None):
return {
'type': type,
'pattern': pattern and pattern.encode('utf-8') or None,
'channel': channel.encode('utf-8'),
'data': data.encode('utf-8') if isinstance(data, basestring) else data
}
def make_subscribe_test_data(pubsub, type):
if type == 'channel':
return {
'p': pubsub,
'sub_type': 'subscribe',
'unsub_type': 'unsubscribe',
'sub_func': pubsub.subscribe,
'unsub_func': pubsub.unsubscribe,
'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')]
}
elif type == 'pattern':
return {
'p': pubsub,
'sub_type': 'psubscribe',
'unsub_type': 'punsubscribe',
'sub_func': pubsub.psubscribe,
'unsub_func': pubsub.punsubscribe,
'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')]
}
assert False, 'invalid subscribe type: %s' % type
class TestPubSubSubscribeUnsubscribe(object):
def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
for key in keys:
assert unsub_func(key) is None
# should be a message for each channel/pattern we just unsubscribed
# from
for i, key in enumerate(keys):
i = len(keys) - 1 - i
assert wait_for_message(p) == make_message(unsub_type, key, i)
def test_channel_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribe_unsubscribe(**kwargs)
def test_pattern_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribe_unsubscribe(**kwargs)
def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type,
sub_func, unsub_func, keys):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
# manually disconnect
p.connection.disconnect()
# calling get_message again reconnects and resubscribes
# note, we may not re-subscribe to channels in exactly the same order
# so we have to do some extra checks to make sure we got them all
messages = []
for i in range(len(keys)):
messages.append(wait_for_message(p))
unique_channels = set()
assert len(messages) == len(keys)
for i, message in enumerate(messages):
assert message['type'] == sub_type
assert message['data'] == i + 1
assert isinstance(message['channel'], bytes)
channel = message['channel'].decode('utf-8')
unique_channels.add(channel)
assert len(unique_channels) == len(keys)
for channel in unique_channels:
assert channel in keys
def test_resubscribe_to_channels_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_resubscribe_on_reconnection(**kwargs)
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_resubscribe_on_reconnection(**kwargs)
def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
assert p.subscribed is False
sub_func(keys[0])
# we're now subscribed even though we haven't processed the
# reply from the server just yet
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# we're still subscribed
assert p.subscribed is True
# unsubscribe from all channels
unsub_func()
# we're still technically subscribed until we process the
# response messages from the server
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# now we're no longer subscribed as no more messages can be delivered
# to any channels we were listening to
assert p.subscribed is False
# subscribing again flips the flag back
sub_func(keys[0])
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# unsubscribe again
unsub_func()
assert p.subscribed is True
# subscribe to another channel before reading the unsubscribe response
sub_func(keys[1])
assert p.subscribed is True
# read the unsubscribe for key1
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# we're still subscribed to key2, so subscribed should still be True
assert p.subscribed is True
# read the key2 subscribe message
assert wait_for_message(p) == make_message(sub_type, keys[1], 1)
unsub_func()
# haven't read the message yet, so we're still subscribed
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[1], 0)
# now we're finally unsubscribed
assert p.subscribed is False
def test_subscribe_property_with_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribed_property(**kwargs)
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribed_property(**kwargs)
def test_ignore_all_subscribe_messages(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
(p.psubscribe, 'f*'),
(p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
assert wait_for_message(p) is None
assert p.subscribed is False
def test_ignore_individual_subscribe_messages(self, r):
p = r.pubsub()
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
(p.psubscribe, 'f*'),
(p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
message = wait_for_message(p, ignore_subscribe_messages=True)
assert message is None
assert p.subscribed is False
class TestPubSubMessages(object):
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
def test_published_message_to_channel(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
assert r.publish('foo', 'test message') == 1
message = wait_for_message(p)
assert isinstance(message, dict)
assert message == make_message('message', 'foo', 'test message')
def test_published_message_to_pattern(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
p.psubscribe('f*')
# 1 to pattern, 1 to channel
assert r.publish('foo', 'test message') == 2
message1 = wait_for_message(p)
message2 = wait_for_message(p)
assert isinstance(message1, dict)
assert isinstance(message2, dict)
expected = [
make_message('message', 'foo', 'test message'),
make_message('pmessage', 'foo', 'test message', pattern='f*')
]
assert message1 in expected
assert message2 in expected
assert message1 != message2
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(foo=self.message_handler)
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', 'foo', 'test message')
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{'f*': self.message_handler})
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', 'foo', 'test message',
pattern='f*')
def test_unicode_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
channel = u('uni') + unichr(4456) + u('code')
channels = {channel: self.message_handler}
p.subscribe(**channels)
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', channel, 'test message')
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = u('uni') + unichr(4456) + u('*')
channel = u('uni') + unichr(4456) + u('code')
p.psubscribe(**{pattern: self.message_handler})
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', channel,
'test message', pattern=pattern)
class TestPubSubAutoDecoding(object):
"These tests only validate that we get unicode values back"
channel = u('uni') + unichr(4456) + u('code')
pattern = u('uni') + unichr(4456) + u('*')
data = u('abc') + unichr(4458) + u('123')
def make_message(self, type, channel, data, pattern=None):
return {
'type': type,
'channel': channel,
'pattern': pattern,
'data': data
}
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
@pytest.fixture()
def r(self, request):
return _redis_client(request=request, decode_responses=True)
def test_channel_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.subscribe(self.channel)
assert wait_for_message(p) == self.make_message('subscribe',
self.channel, 1)
p.unsubscribe(self.channel)
assert wait_for_message(p) == self.make_message('unsubscribe',
self.channel, 0)
def test_pattern_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('psubscribe',
self.pattern, 1)
p.punsubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('punsubscribe',
self.pattern, 0)
def test_channel_publish(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(self.channel)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('message',
self.channel,
self.data)
def test_pattern_publish(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(self.pattern)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('pmessage',
self.channel,
self.data,
pattern=self.pattern)
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{self.channel: self.message_handler})
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
self.data)
# test that we reconnected to the correct channel
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + u('new data')
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
new_data)
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{self.pattern: self.message_handler})
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
self.data,
pattern=self.pattern)
# test that we reconnected to the correct pattern
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + u('new data')
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
new_data,
pattern=self.pattern)
class TestPubSubRedisDown(object):
def test_channel_subscribe(self, r):
r = redis.Redis(host='localhost', port=6390)
p = r.pubsub()
with pytest.raises(ConnectionError):
p.subscribe('foo')
| mit |
2014c2g1/c2g1 | wsgi/static/Brython2.1.0-20140419-113919/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| gpl-2.0 |
Jumpscale/jumpscale_core8 | tests/sal/TestSSHD.py | 1 | 3126 | import unittest
from JumpScale import j
class TestSSHD(unittest.TestCase):
def setUp(self):
self.sshd = j.sal.sshd
self.key1 = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCUlY0UEUNExAQF/sIw2L2AJEmHj0eTCnSCwg7gYOQDNhrrzD0+HJulD1UTz+zZqiC2nIPWMfWBoEs3i4jDj79fyiGx4pgQJXFwioIqTONlEyvPIY0eCm3eeSaWrK9G0STdlCrrofZzuAL5/SCKiqTEizZe1MqhJT/xs2xpD+hHFIyMIuBl9OOLX2XvFQ6mBB1bq4U1jpemuHk7L/M0m73Na4M2CQWVDUl/CRhNyhI+WlB2i9dwI3RwrtUp98MCAF//cx3xVC4NfHONQmN8j7z/WpsfJIadqOxfnOp5y4kj1EqbtmeKZbYvR2ZtcAibcnWs0/4kNDn723NheG/secHT root@myjs8xenial'
self.key2 = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCUlY0UEUNExAQF/sIw2L2AJEmHj0eTCnSCwg7gYOQDNhrrzD0+HJulD1UTz+zZqiC2nIPWMfWBoEs3i4jDj79fyiGx4pgQJXFwioIqTONlEyvPIY0eCm3eeSaWrK9G0STdlCrrofZzuAL5/SCKiqTEizZe1MqhJT/xs2xpD+hHFIyMIuBl9OOLX2XvFQ6mBB1bq4U1jpemuHk7L/M0m73Na4M2CQWVDUl/CRhNyhI+WlB2i9dwI3RwrtUp98MCAF//cx3xVC4NfHONQmN8j7z/WpsfJIadqOxfnOp5y4kj1EqbtmeKZbYvR2ZtcAibcnWs0/4kNDn723NheG/secHT root@myjs8xenial2'
self.sshroot = j.tools.path.get("/tmp/sshroot")
self.authkeysfile = j.tools.path.get("/tmp/sshroot/authkeys")
j.sal.fs.createDir(self.sshroot)
j.sal.fs.touch(self.authkeysfile)
self.sshd.commit()
self.sshd.SSH_ROOT = self.sshroot
self.sshd.SSH_AUTHORIZED_KEYS = self.authkeysfile
def tearDown(self):
j.sal.fs.removeDirTree("/tmp/sshroot/authkeys")
def test_list_keys_emptyfile(self):
self.assertEqual(len(self.sshd.keys), 0)
def test_list_keys(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
self.assertIn(self.key1, self.sshd.keys)
self.assertIn(self.key2, self.sshd.keys)
def test_add_key(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
def test_key_exists_after_add(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
self.assertIn(self.key1, self.sshd.keys)
def test_key_doesntexists_after_delete(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
self.assertIn(self.key1, self.sshd.keys)
self.sshd.deleteKey(self.key1)
self.sshd.commit()
self.assertNotIn(self.key1, self.sshd.keys)
def test_erase(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
self.sshd.erase()
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 0)
def test_delete_key(self):
self.sshd.addKey(self.key1)
self.sshd.addKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 2)
self.sshd.deleteKey(self.key2)
self.sshd.commit()
self.assertEqual(len(self.sshd.keys), 1)
| apache-2.0 |
khallaghi/mongoengine | mongoengine/queryset/transform.py | 15 | 15345 | from collections import defaultdict
import pymongo
from bson import SON
from mongoengine.base.fields import UPDATE_OPERATORS
from mongoengine.connection import get_connection
from mongoengine.common import _import_class
from mongoengine.errors import InvalidQueryError
from mongoengine.python_support import IS_PYMONGO_3
__all__ = ('query', 'update')
COMPARISON_OPERATORS = ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod',
'all', 'size', 'exists', 'not', 'elemMatch', 'type')
GEO_OPERATORS = ('within_distance', 'within_spherical_distance',
'within_box', 'within_polygon', 'near', 'near_sphere',
'max_distance', 'min_distance', 'geo_within', 'geo_within_box',
'geo_within_polygon', 'geo_within_center',
'geo_within_sphere', 'geo_intersects')
STRING_OPERATORS = ('contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact')
CUSTOM_OPERATORS = ('match',)
MATCH_OPERATORS = (COMPARISON_OPERATORS + GEO_OPERATORS +
STRING_OPERATORS + CUSTOM_OPERATORS)
def query(_doc_cls=None, **query):
"""Transform a query from Django-style format to Mongo format.
"""
mongo_query = {}
merge_query = defaultdict(list)
for key, value in sorted(query.items()):
if key == "__raw__":
mongo_query.update(value)
continue
parts = key.rsplit('__')
indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
parts = [part for part in parts if not part.isdigit()]
# Check for an operator and transform to mongo-style if there is
op = None
if len(parts) > 1 and parts[-1] in MATCH_OPERATORS:
op = parts.pop()
# Allw to escape operator-like field name by __
if len(parts) > 1 and parts[-1] == "":
parts.pop()
negate = False
if len(parts) > 1 and parts[-1] == 'not':
parts.pop()
negate = True
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
CachedReferenceField = _import_class('CachedReferenceField')
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, basestring):
parts.append(field)
append_field = False
# is last and CachedReferenceField
elif isinstance(field, CachedReferenceField) and fields[-1] == field:
parts.append('%s._id' % field.db_field)
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not']
singular_ops += STRING_OPERATORS
if op in singular_ops:
if isinstance(field, basestring):
if (op in STRING_OPERATORS and
isinstance(value, basestring)):
StringField = _import_class('StringField')
value = StringField.prepare_query_value(op, value)
else:
value = field
else:
value = field.prepare_query_value(op, value)
if isinstance(field, CachedReferenceField) and value:
value = value['_id']
elif op in ('in', 'nin', 'all', 'near') and not isinstance(value, dict):
# 'in', 'nin' and 'all' require a list of values
value = [field.prepare_query_value(op, v) for v in value]
# if op and op not in COMPARISON_OPERATORS:
if op:
if op in GEO_OPERATORS:
value = _geo_operator(field, op, value)
elif op in CUSTOM_OPERATORS:
if op in ('elem_match', 'match'):
value = field.prepare_query_value(op, value)
value = {"$elemMatch": value}
else:
NotImplementedError("Custom method '%s' has not "
"been implemented" % op)
elif op not in STRING_OPERATORS:
value = {'$' + op: value}
if negate:
value = {'$not': value}
for i, part in indices:
parts.insert(i, part)
key = '.'.join(parts)
if op is None or key not in mongo_query:
mongo_query[key] = value
elif key in mongo_query:
if key in mongo_query and isinstance(mongo_query[key], dict):
mongo_query[key].update(value)
# $max/minDistance needs to come last - convert to SON
value_dict = mongo_query[key]
if ('$maxDistance' in value_dict or '$minDistance' in value_dict) and \
('$near' in value_dict or '$nearSphere' in value_dict):
value_son = SON()
for k, v in value_dict.iteritems():
if k == '$maxDistance' or k == '$minDistance':
continue
value_son[k] = v
# Required for MongoDB >= 2.6, may fail when combining
# PyMongo 3+ and MongoDB < 2.6
near_embedded = False
for near_op in ('$near', '$nearSphere'):
if isinstance(value_dict.get(near_op), dict) and (
IS_PYMONGO_3 or get_connection().max_wire_version > 1):
value_son[near_op] = SON(value_son[near_op])
if '$maxDistance' in value_dict:
value_son[near_op][
'$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son[near_op][
'$minDistance'] = value_dict['$minDistance']
near_embedded = True
if not near_embedded:
if '$maxDistance' in value_dict:
value_son['$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son['$minDistance'] = value_dict['$minDistance']
mongo_query[key] = value_son
else:
# Store for manually merging later
merge_query[key].append(value)
# The queryset has been filter in such a way we must manually merge
for k, v in merge_query.items():
merge_query[k].append(mongo_query[k])
del mongo_query[k]
if isinstance(v, list):
value = [{k: val} for val in v]
if '$and' in mongo_query.keys():
mongo_query['$and'].extend(value)
else:
mongo_query['$and'] = value
return mongo_query
def update(_doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
mongo_update = {}
for key, value in update.items():
if key == "__raw__":
mongo_update.update(value)
continue
parts = key.split('__')
# if there is no operator, default to "set"
if len(parts) < 3 and parts[0] not in UPDATE_OPERATORS:
parts.insert(0, 'set')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in UPDATE_OPERATORS:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
elif op == 'add_to_set':
op = 'addToSet'
elif op == 'set_on_insert':
op = "setOnInsert"
match = None
if parts[-1] in COMPARISON_OPERATORS:
match = parts.pop()
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
cleaned_fields = []
appended_sub_field = False
for field in fields:
append_field = True
if isinstance(field, basestring):
# Convert the S operator to $
if field == 'S':
field = '$'
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
appended_sub_field = False
cleaned_fields.append(field)
if hasattr(field, 'field'):
cleaned_fields.append(field.field)
appended_sub_field = True
# Convert value to proper value
if appended_sub_field:
field = cleaned_fields[-2]
else:
field = cleaned_fields[-1]
GeoJsonBaseField = _import_class("GeoJsonBaseField")
if isinstance(field, GeoJsonBaseField):
value = field.to_mongo(value)
if op in (None, 'set', 'push', 'pull'):
if field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
elif op in ('addToSet', 'setOnInsert'):
if isinstance(value, (list, tuple, set)):
value = [field.prepare_query_value(op, v) for v in value]
elif field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op == "unset":
value = 1
if match:
match = '$' + match
value = {match: value}
key = '.'.join(parts)
if not op:
raise InvalidQueryError("Updates must supply an operation "
"eg: set__FIELD=value")
if 'pull' in op and '.' in key:
# Dot operators don't work on pull operations
# unless they point to a list field
# Otherwise it uses nested dict syntax
if op == 'pullAll':
raise InvalidQueryError("pullAll operations only support "
"a single field depth")
# Look for the last list field and use dot notation until there
field_classes = [c.__class__ for c in cleaned_fields]
field_classes.reverse()
ListField = _import_class('ListField')
if ListField in field_classes:
# Join all fields via dot notation to the last ListField
# Then process as normal
last_listField = len(
cleaned_fields) - field_classes.index(ListField)
key = ".".join(parts[:last_listField])
parts = parts[last_listField:]
parts.insert(0, key)
parts.reverse()
for key in parts:
value = {key: value}
elif op == 'addToSet' and isinstance(value, list):
value = {key: {"$each": value}}
else:
value = {key: value}
key = '$' + op
if key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def _geo_operator(field, op, value):
"""Helper to return the query for a given geo query"""
if op == "max_distance":
value = {'$maxDistance': value}
elif op == "min_distance":
value = {'$minDistance': value}
elif field._geo_index == pymongo.GEO2D:
if op == "within_distance":
value = {'$within': {'$center': value}}
elif op == "within_spherical_distance":
value = {'$within': {'$centerSphere': value}}
elif op == "within_polygon":
value = {'$within': {'$polygon': value}}
elif op == "near":
value = {'$near': value}
elif op == "near_sphere":
value = {'$nearSphere': value}
elif op == 'within_box':
value = {'$within': {'$box': value}}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a GeoPointField" % op)
else:
if op == "geo_within":
value = {"$geoWithin": _infer_geometry(value)}
elif op == "geo_within_box":
value = {"$geoWithin": {"$box": value}}
elif op == "geo_within_polygon":
value = {"$geoWithin": {"$polygon": value}}
elif op == "geo_within_center":
value = {"$geoWithin": {"$center": value}}
elif op == "geo_within_sphere":
value = {"$geoWithin": {"$centerSphere": value}}
elif op == "geo_intersects":
value = {"$geoIntersects": _infer_geometry(value)}
elif op == "near":
value = {'$near': _infer_geometry(value)}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a %s " % (op, field._name))
return value
def _infer_geometry(value):
"""Helper method that tries to infer the $geometry shape for a given value"""
if isinstance(value, dict):
if "$geometry" in value:
return value
elif 'coordinates' in value and 'type' in value:
return {"$geometry": value}
raise InvalidQueryError("Invalid $geometry dictionary should have "
"type and coordinates keys")
elif isinstance(value, (list, set)):
# TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon?
try:
value[0][0][0]
return {"$geometry": {"type": "Polygon", "coordinates": value}}
except:
pass
try:
value[0][0]
return {"$geometry": {"type": "LineString", "coordinates": value}}
except:
pass
try:
value[0]
return {"$geometry": {"type": "Point", "coordinates": value}}
except:
pass
raise InvalidQueryError("Invalid $geometry data. Can be either a dictionary "
"or (nested) lists of coordinate(s)")
| mit |
jessekl/flixr | venv/lib/python2.7/site-packages/pip/utils/deprecation.py | 271 | 2152 | """
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class RemovedInPip8Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
class RemovedInPip9Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
DEPRECATIONS = [RemovedInPip8Warning, RemovedInPip9Warning]
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# Things that are DeprecationWarnings will be removed in the very
# next version of pip. We want these to be more obvious so we
# use the ERROR logging level while the PendingDeprecationWarnings
# are still have at least 2 versions to go until they are removed
# so they can just be warnings.
if issubclass(category, DeprecationWarning):
logger.error(log_message)
else:
logger.warning(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
| mit |
matthieudumont/dipy | scratch/very_scratch/tractography_clustering_using_larch.py | 22 | 1428 | import time
import os
import numpy as np
from nibabel import trackvis as tv
from dipy.viz import fos
from dipy.io import pickles as pkl
from dipy.core import track_learning as tl
from dipy.core import track_performance as pf
from dipy.core import track_metrics as tm
fname='/home/eg01/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
C_fname='/tmp/larch_tree.pkl'
appr_fname='/tmp/larch_tracks.trk'
print 'Loading trackvis file...'
streams,hdr=tv.read(fname)
print 'Copying tracks...'
tracks=[i[0] for i in streams]
#tracks=tracks[:1000]
#print 'Deleting unnecessary data...'
del streams#,hdr
if not os.path.isfile(C_fname):
print 'Starting LARCH ...'
tim=time.clock()
C,atracks=tl.larch(tracks,[50.**2,20.**2,5.**2],True,True)
#tracks=[tm.downsample(t,3) for t in tracks]
#C=pf.local_skeleton_clustering(tracks,20.)
print 'Done in total of ',time.clock()-tim,'seconds.'
print 'Saving result...'
pkl.save_pickle(C_fname,C)
streams=[(i,None,None)for i in atracks]
tv.write(appr_fname,streams,hdr)
else:
print 'Loading result...'
C=pkl.load_pickle(C_fname)
skel=[]
for c in C:
skel.append(C[c]['repz'])
print 'Showing dataset after clustering...'
r=fos.ren()
fos.clear(r)
colors=np.zeros((len(skel),3))
for (i,s) in enumerate(skel):
color=np.random.rand(1,3)
colors[i]=color
fos.add(r,fos.line(skel,colors,opacity=1))
fos.show(r)
| bsd-3-clause |
darthm0e/WeatherStation-RPI | luxreader.py | 1 | 3198 | from Adafruit_I2C import Adafruit_I2C
import time
class TSL2561:
i2c = None
def __init__(self, address=0x29, debug=0, pause=0.8):
self.i2c = Adafruit_I2C(address)
self.address = address
self.pause = pause
self.debug = debug
self.gain = 0 # no gain preselected
self.i2c.write8(0x80, 0x03) # enable the device
def setGain(self,gain=1):
""" Set the gain """
if (gain != self.gain):
if (gain==1):
self.i2c.write8(0x81, 0x02) # set gain = 1X and timing = 402 mSec
if (self.debug):
print "Setting low gain"
else:
self.i2c.write8(0x81, 0x12) # set gain = 16X and timing = 402 mSec
if (self.debug):
print "Setting high gain"
self.gain=gain; # safe gain for calculation
time.sleep(self.pause) # pause for integration (self.pause must be bigger than integration time)
def readWord(self, reg):
"""Reads a word from the I2C device"""
try:
wordval = self.i2c.readU16(reg)
newval = self.i2c.reverseByteOrder(wordval)
if (self.debug):
print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, wordval & 0xFFFF, reg))
return newval
except IOError:
print("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readFull(self, reg=0x8C):
"""Reads visible+IR diode from the I2C device"""
return self.readWord(reg);
def readIR(self, reg=0x8E):
"""Reads IR only diode from the I2C device"""
return self.readWord(reg);
def readLux(self, gain = 0):
"""Grabs a lux reading either with autoranging (gain=0) or with a specified gain (1, 16)"""
if (gain == 1 or gain == 16):
self.setGain(gain) # low/highGain
ambient = self.readFull()
IR = self.readIR()
elif (gain==0): # auto gain
self.setGain(16) # first try highGain
ambient = self.readFull()
if (ambient < 65535):
IR = self.readIR()
if (ambient >= 65535 or IR >= 65535): # value(s) exeed(s) datarange
self.setGain(1) # set lowGain
ambient = self.readFull()
IR = self.readIR()
if (self.gain==1):
ambient *= 16 # scale 1x to 16x
IR *= 16 # scale 1x to 16x
ratio = (IR / float(ambient)) # changed to make it run under python 2
if (self.debug):
print "IR Result", IR
print "Ambient Result", ambient
if ((ratio >= 0) & (ratio <= 0.52)):
lux = (0.0315 * ambient) - (0.0593 * ambient * (ratio**1.4))
elif (ratio <= 0.65):
lux = (0.0229 * ambient) - (0.0291 * IR)
elif (ratio <= 0.80):
lux = (0.0157 * ambient) - (0.018 * IR)
elif (ratio <= 1.3):
lux = (0.00338 * ambient) - (0.0026 * IR)
elif (ratio > 1.3):
lux = 0
return lux
| gpl-2.0 |
aitoehigie/gidimagic | venv/lib/python2.7/site-packages/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| mit |
shaananc/security-proj2 | bindings/python/apidefs/gcc-LP64/ns3_module_uan.py | 58 | 3516 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| gpl-2.0 |
aktech/sympy | sympy/functions/special/tests/test_hyper.py | 19 | 13589 | from sympy import (hyper, meijerg, S, Tuple, pi, I, exp, log,
cos, sqrt, symbols, oo, Derivative, gamma, O)
from sympy.series.limits import limit
from sympy.abc import x, z, k
from sympy.utilities.pytest import raises, slow
from sympy.utilities.randtest import (
random_complex_number as randcplx,
verify_numerically as tn,
test_derivative_numerically as td)
def test_TupleParametersBase():
# test that our implementation of the chain rule works
p = hyper((), (), z**2)
assert p.diff(z) == p*2*z
def test_hyper():
raises(TypeError, lambda: hyper(1, 2, z))
assert hyper((1, 2), (1,), z) == hyper(Tuple(1, 2), Tuple(1), z)
h = hyper((1, 2), (3, 4, 5), z)
assert h.ap == Tuple(1, 2)
assert h.bq == Tuple(3, 4, 5)
assert h.argument == z
assert h.is_commutative is True
# just a few checks to make sure that all arguments go where they should
assert tn(hyper(Tuple(), Tuple(), z), exp(z), z)
assert tn(z*hyper((1, 1), Tuple(2), -z), log(1 + z), z)
# differentiation
h = hyper(
(randcplx(), randcplx(), randcplx()), (randcplx(), randcplx()), z)
assert td(h, z)
a1, a2, b1, b2, b3 = symbols('a1:3, b1:4')
assert hyper((a1, a2), (b1, b2, b3), z).diff(z) == \
a1*a2/(b1*b2*b3) * hyper((a1 + 1, a2 + 1), (b1 + 1, b2 + 1, b3 + 1), z)
# differentiation wrt parameters is not supported
assert hyper([z], [], z).diff(z) == Derivative(hyper([z], [], z), z)
# hyper is unbranched wrt parameters
from sympy import polar_lift
assert hyper([polar_lift(z)], [polar_lift(k)], polar_lift(x)) == \
hyper([z], [k], polar_lift(x))
def test_expand_func():
# evaluation at 1 of Gauss' hypergeometric function:
from sympy.abc import a, b, c
from sympy import gamma, expand_func
a1, b1, c1 = randcplx(), randcplx(), randcplx() + 5
assert expand_func(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(-a - b + c)/(gamma(-a + c)*gamma(-b + c))
assert abs(expand_func(hyper([a1, b1], [c1], 1)).n()
- hyper([a1, b1], [c1], 1).n()) < 1e-10
# hyperexpand wrapper for hyper:
assert expand_func(hyper([], [], z)) == exp(z)
assert expand_func(hyper([1, 2, 3], [], z)) == hyper([1, 2, 3], [], z)
assert expand_func(meijerg([[1, 1], []], [[1], [0]], z)) == log(z + 1)
assert expand_func(meijerg([[1, 1], []], [[], []], z)) == \
meijerg([[1, 1], []], [[], []], z)
def replace_dummy(expr, sym):
from sympy import Dummy
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_hyper_rewrite_sum():
from sympy import RisingFactorial, factorial, Dummy, Sum
_k = Dummy("k")
assert replace_dummy(hyper((1, 2), (1, 3), x).rewrite(Sum), _k) == \
Sum(x**_k / factorial(_k) * RisingFactorial(2, _k) /
RisingFactorial(3, _k), (_k, 0, oo))
assert hyper((1, 2, 3), (-1, 3), z).rewrite(Sum) == \
hyper((1, 2, 3), (-1, 3), z)
def test_radius_of_convergence():
assert hyper((1, 2), [3], z).radius_of_convergence == 1
assert hyper((1, 2), [3, 4], z).radius_of_convergence == oo
assert hyper((1, 2, 3), [4], z).radius_of_convergence == 0
assert hyper((0, 1, 2), [4], z).radius_of_convergence == oo
assert hyper((-1, 1, 2), [-4], z).radius_of_convergence == 0
assert hyper((-1, -2, 2), [-1], z).radius_of_convergence == oo
assert hyper((-1, 2), [-1, -2], z).radius_of_convergence == 0
assert hyper([-1, 1, 3], [-2, 2], z).radius_of_convergence == 1
assert hyper([-1, 1], [-2, 2], z).radius_of_convergence == oo
assert hyper([-1, 1, 3], [-2], z).radius_of_convergence == 0
assert hyper((-1, 2, 3, 4), [], z).radius_of_convergence == oo
assert hyper([1, 1], [3], 1).convergence_statement == True
assert hyper([1, 1], [2], 1).convergence_statement == False
assert hyper([1, 1], [2], -1).convergence_statement == True
assert hyper([1, 1], [1], -1).convergence_statement == False
def test_meijer():
raises(TypeError, lambda: meijerg(1, z))
raises(TypeError, lambda: meijerg(((1,), (2,)), (3,), (4,), z))
assert meijerg(((1, 2), (3,)), ((4,), (5,)), z) == \
meijerg(Tuple(1, 2), Tuple(3), Tuple(4), Tuple(5), z)
g = meijerg((1, 2), (3, 4, 5), (6, 7, 8, 9), (10, 11, 12, 13, 14), z)
assert g.an == Tuple(1, 2)
assert g.ap == Tuple(1, 2, 3, 4, 5)
assert g.aother == Tuple(3, 4, 5)
assert g.bm == Tuple(6, 7, 8, 9)
assert g.bq == Tuple(6, 7, 8, 9, 10, 11, 12, 13, 14)
assert g.bother == Tuple(10, 11, 12, 13, 14)
assert g.argument == z
assert g.nu == 75
assert g.delta == -1
assert g.is_commutative is True
assert meijerg([1, 2], [3], [4], [5], z).delta == S(1)/2
# just a few checks to make sure that all arguments go where they should
assert tn(meijerg(Tuple(), Tuple(), Tuple(0), Tuple(), -z), exp(z), z)
assert tn(sqrt(pi)*meijerg(Tuple(), Tuple(),
Tuple(0), Tuple(S(1)/2), z**2/4), cos(z), z)
assert tn(meijerg(Tuple(1, 1), Tuple(), Tuple(1), Tuple(0), z),
log(1 + z), z)
# test exceptions
raises(ValueError, lambda: meijerg(((3, 1), (2,)), ((oo,), (2, 0)), x))
raises(ValueError, lambda: meijerg(((3, 1), (2,)), ((1,), (2, 0)), x))
# differentiation
g = meijerg((randcplx(),), (randcplx() + 2*I,), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), (randcplx(),), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), Tuple(), Tuple(randcplx()),
Tuple(randcplx(), randcplx()), z)
assert td(g, z)
a1, a2, b1, b2, c1, c2, d1, d2 = symbols('a1:3, b1:3, c1:3, d1:3')
assert meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z).diff(z) == \
(meijerg((a1 - 1, a2), (b1, b2), (c1, c2), (d1, d2), z)
+ (a1 - 1)*meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z))/z
assert meijerg([z, z], [], [], [], z).diff(z) == \
Derivative(meijerg([z, z], [], [], [], z), z)
# meijerg is unbranched wrt parameters
from sympy import polar_lift as pl
assert meijerg([pl(a1)], [pl(a2)], [pl(b1)], [pl(b2)], pl(z)) == \
meijerg([a1], [a2], [b1], [b2], pl(z))
# integrand
from sympy.abc import a, b, c, d, s
assert meijerg([a], [b], [c], [d], z).integrand(s) == \
z**s*gamma(c - s)*gamma(-a + s + 1)/(gamma(b - s)*gamma(-d + s + 1))
def test_meijerg_derivative():
assert meijerg([], [1, 1], [0, 0, x], [], z).diff(x) == \
log(z)*meijerg([], [1, 1], [0, 0, x], [], z) \
+ 2*meijerg([], [1, 1, 1], [0, 0, x, 0], [], z)
y = randcplx()
a = 5 # mpmath chokes with non-real numbers, and Mod1 with floats
assert td(meijerg([x], [], [], [], y), x)
assert td(meijerg([x**2], [], [], [], y), x)
assert td(meijerg([], [x], [], [], y), x)
assert td(meijerg([], [], [x], [], y), x)
assert td(meijerg([], [], [], [x], y), x)
assert td(meijerg([x], [a], [a + 1], [], y), x)
assert td(meijerg([x], [a + 1], [a], [], y), x)
assert td(meijerg([x, a], [], [], [a + 1], y), x)
assert td(meijerg([x, a + 1], [], [], [a], y), x)
b = S(3)/2
assert td(meijerg([a + 2], [b], [b - 3, x], [a], y), x)
def test_meijerg_period():
assert meijerg([], [1], [0], [], x).get_period() == 2*pi
assert meijerg([1], [], [], [0], x).get_period() == 2*pi
assert meijerg([], [], [0], [], x).get_period() == 2*pi # exp(x)
assert meijerg(
[], [], [0], [S(1)/2], x).get_period() == 2*pi # cos(sqrt(x))
assert meijerg(
[], [], [S(1)/2], [0], x).get_period() == 4*pi # sin(sqrt(x))
assert meijerg([1, 1], [], [1], [0], x).get_period() == oo # log(1 + x)
def test_hyper_unpolarify():
from sympy import exp_polar
a = exp_polar(2*pi*I)*x
b = x
assert hyper([], [], a).argument == b
assert hyper([0], [], a).argument == a
assert hyper([0], [0], a).argument == b
assert hyper([0, 1], [0], a).argument == a
@slow
def test_hyperrep():
from sympy.functions.special.hyper import (HyperRep, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin)
# First test the base class works.
from sympy import Piecewise, exp_polar
a, b, c, d, z = symbols('a b c d z')
class myrep(HyperRep):
@classmethod
def _expr_small(cls, x):
return a
@classmethod
def _expr_small_minus(cls, x):
return b
@classmethod
def _expr_big(cls, x, n):
return c*n
@classmethod
def _expr_big_minus(cls, x, n):
return d*n
assert myrep(z).rewrite('nonrep') == Piecewise((0, abs(z) > 1), (a, True))
assert myrep(exp_polar(I*pi)*z).rewrite('nonrep') == \
Piecewise((0, abs(z) > 1), (b, True))
assert myrep(exp_polar(2*I*pi)*z).rewrite('nonrep') == \
Piecewise((c, abs(z) > 1), (a, True))
assert myrep(exp_polar(3*I*pi)*z).rewrite('nonrep') == \
Piecewise((d, abs(z) > 1), (b, True))
assert myrep(exp_polar(4*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*c, abs(z) > 1), (a, True))
assert myrep(exp_polar(5*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*d, abs(z) > 1), (b, True))
assert myrep(z).rewrite('nonrepsmall') == a
assert myrep(exp_polar(I*pi)*z).rewrite('nonrepsmall') == b
def t(func, hyp, z):
""" Test that func is a valid representation of hyp. """
# First test that func agrees with hyp for small z
if not tn(func.rewrite('nonrepsmall'), hyp, z,
a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check that the two small representations agree.
if not tn(
func.rewrite('nonrepsmall').subs(
z, exp_polar(I*pi)*z).replace(exp_polar, exp),
func.subs(z, exp_polar(I*pi)*z).rewrite('nonrepsmall'),
z, a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check continuity along exp_polar(I*pi)*t
expr = func.subs(z, exp_polar(I*pi)*z).rewrite('nonrep')
if abs(expr.subs(z, 1 + 1e-15).n() - expr.subs(z, 1 - 1e-15).n()) > 1e-10:
return False
# Finally check continuity of the big reps.
def dosubs(func, a, b):
rv = func.subs(z, exp_polar(a)*z).rewrite('nonrep')
return rv.subs(z, exp_polar(b)*z).replace(exp_polar, exp)
for n in [0, 1, 2, 3, 4, -1, -2, -3, -4]:
expr1 = dosubs(func, 2*I*pi*n, I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, -I*pi/2)
if not tn(expr1, expr2, z):
return False
expr1 = dosubs(func, 2*I*pi*(n + 1), -I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, I*pi/2)
if not tn(expr1, expr2, z):
return False
return True
# Now test the various representatives.
a = S(1)/3
assert t(HyperRep_atanh(z), hyper([S(1)/2, 1], [S(3)/2], z), z)
assert t(HyperRep_power1(a, z), hyper([-a], [], z), z)
assert t(HyperRep_power2(a, z), hyper([a, a - S(1)/2], [2*a], z), z)
assert t(HyperRep_log1(z), -z*hyper([1, 1], [2], z), z)
assert t(HyperRep_asin1(z), hyper([S(1)/2, S(1)/2], [S(3)/2], z), z)
assert t(HyperRep_asin2(z), hyper([1, 1], [S(3)/2], z), z)
assert t(HyperRep_sqrts1(a, z), hyper([-a, S(1)/2 - a], [S(1)/2], z), z)
assert t(HyperRep_sqrts2(a, z),
-2*z/(2*a + 1)*hyper([-a - S(1)/2, -a], [S(1)/2], z).diff(z), z)
assert t(HyperRep_log2(z), -z/4*hyper([S(3)/2, 1, 1], [2, 2], z), z)
assert t(HyperRep_cosasin(a, z), hyper([-a, a], [S(1)/2], z), z)
assert t(HyperRep_sinasin(a, z), 2*a*z*hyper([1 - a, 1 + a], [S(3)/2], z), z)
@slow
def test_meijerg_eval():
from sympy import besseli, exp_polar
from sympy.abc import l
a = randcplx()
arg = x*exp_polar(k*pi*I)
expr1 = pi*meijerg([[], [(a + 1)/2]], [[a/2], [-a/2, (a + 1)/2]], arg**2/4)
expr2 = besseli(a, arg)
# Test that the two expressions agree for all arguments.
for x_ in [0.5, 1.5]:
for k_ in [0.0, 0.1, 0.3, 0.5, 0.8, 1, 5.751, 15.3]:
assert abs((expr1 - expr2).n(subs={x: x_, k: k_})) < 1e-10
assert abs((expr1 - expr2).n(subs={x: x_, k: -k_})) < 1e-10
# Test continuity independently
eps = 1e-13
expr2 = expr1.subs(k, l)
for x_ in [0.5, 1.5]:
for k_ in [0.5, S(1)/3, 0.25, 0.75, S(2)/3, 1.0, 1.5]:
assert abs((expr1 - expr2).n(
subs={x: x_, k: k_ + eps, l: k_ - eps})) < 1e-10
assert abs((expr1 - expr2).n(
subs={x: x_, k: -k_ + eps, l: -k_ - eps})) < 1e-10
expr = (meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(-I*pi)/4)
+ meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(I*pi)/4)) \
/(2*sqrt(pi))
assert (expr - pi/exp(1)).n(chop=True) == 0
def test_limits():
k, x = symbols('k, x')
assert hyper((1,), (S(4)/3, S(5)/3), k**2).series(k) == \
hyper((1,), (S(4)/3, S(5)/3), 0) + \
9*k**2*hyper((2,), (S(7)/3, S(8)/3), 0)/20 + \
81*k**4*hyper((3,), (S(10)/3, S(11)/3), 0)/1120 + \
O(k**6) # issue 6350
assert limit(meijerg((), (), (1,), (0,), -x), x, 0) == \
meijerg(((), ()), ((1,), (0,)), 0) # issue 6052
| bsd-3-clause |
anti1869/aeroport | src/aeroport/destinations/stream.py | 1 | 1293 | """
Send payload to the Stream (messaging abstraction, provided by SunHead framework.
"""
import asyncio
from sunhead.events.stream import init_stream_from_settings
from sunhead.metrics import get_metrics
from aeroport.abc import AbstractDestination, AbstractPayload
from aeroport.utils import register_metric
class StreamDestination(AbstractDestination):
"""
Send payloads to the SunHead framework's stream (which is distributed queues).
"""
def __init__(self, **init_kwargs):
super().__init__(**init_kwargs)
self._stream = None
self._loop = asyncio.get_event_loop()
self._metrics = get_metrics()
self._metric_sent = register_metric(
self._metrics,
"counter",
"stream_payloads_sent_total",
""
)
async def prepare(self):
self._stream = await init_stream_from_settings(self._init_kwargs)
await self._stream.connect()
async def release(self):
await self._stream.close()
async def process_payload(self, payload: AbstractPayload):
pname = payload.__class__.__name__.lower()
await self._stream.publish(payload.as_dict, ("aeroport.payload_sent.{}".format(pname), ))
self._metrics.counters.get(self._metric_sent).inc()
| apache-2.0 |
h2oai/h2o | py/testdir_single_jvm/notest_parse_commented_header.py | 9 | 5097 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
import h2o_browse as h2b
def write_syn_dataset(csvPathname, rowCount, headerData, rList):
dsf = open(csvPathname, "w+")
dsf.write(headerData + "\n")
for i in range(rowCount):
# two choices on the input. Make output choices random
r = rList[random.randint(0,1)] + "," + str(random.randint(0,7))
dsf.write(r + "\n")
dsf.close()
def rand_rowData(colCount):
rowData = [random.randint(0,7) for i in range(colCount)]
rowData1= ",".join(map(str,rowData))
rowData = [random.randint(0,7) for i in range(colCount)]
rowData2= ",".join(map(str,rowData))
# RF will complain if all inputs are the same
r = [rowData1, rowData2]
return r
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(2,java_heap_MB=1300,use_flatfile=True)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_commented_header(self):
print "If header=1, and the first char of first line is #, the # should be removed/ignored" + \
"and the line parsed normally as a header"
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_ints.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
headerData = "ID,CAPSULE,AGE,RACE,DPROS,DCAPS,PSA,VOL,GLEASON,output"
# cols must be 9 to match the header above, otherwise a different bug is hit
# extra output is added, so it's 10 total
# try different header prefixes to see if it's okay..only white space is space?
tryList = [
(2, 3, 9, 'cA', 60, "#"),
(2, 3, 9, 'cA', 60, "# "),
(2, 3, 9, 'cA', 60, " #"),
(2, 3, 9, 'cA', 60, " # "),
(2, 3, 9, 'cA', 60, "# "),
]
trial = 0
for (fileNum, rowCount, colCount, hex_key, timeoutSecs, headerPrefix) in tryList:
trial += 1
# FIX! should we add a header to them randomly???
print "Wait while", fileNum, "synthetic files are created in", SYNDATASETS_DIR
rowxcol = str(rowCount) + 'x' + str(colCount)
totalCols = colCount + 1 # 1 extra for output
totalRows = 0
# might as well try for multi-file parse?
for fileN in range(fileNum):
csvFilename = 'syn_' + str(fileN) + "_" + str(SEED) + "_" + rowxcol + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
rList = rand_rowData(colCount)
write_syn_dataset(csvPathname, rowCount, headerData=headerPrefix + headerData, rList=rList)
totalRows += rowCount
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
key = "syn_" + str(trial)
hex_key = "syn_" + str(trial) + ".hex"
# DON"T get redirected to S3! (EC2 hack in config, remember!)
# use it at the node level directly (because we gen'ed the files.
# I suppose we could force the redirect state bits in h2o.nodes[0] to False, instead?:w
h2i.import_only(path=SYNDATASETS_DIR + '/*')
# use regex. the only files in the dir will be the ones we just created with *fileN* match
start = time.time()
parseResult = h2i.import_parse(path=SYNDATASETS_DIR + '/*'+rowxcol+'*', schema='local',
hex_key=hex_key, header=1, timeoutSecs=timeoutSecs)
print "parseResult['destination_key']: " + parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# should match # of cols in header or ??
self.assertEqual(inspect['numCols'], totalCols,
"parse created result with the wrong number of cols %s %s" % (inspect['numCols'], totalCols))
self.assertEqual(inspect['numRows'], totalRows,
"parse created result with the wrong number of rows (header shouldn't count) %s %s" % \
(inspect['numRows'], totalRows))
kwargs = {'sample': 75, 'depth': 25, 'ntree': 1, 'ignore': 'ID,CAPSULE'}
start = time.time()
rfv = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
print "trial #", trial, "totalRows:", totalRows, "parse end on ", csvFilename, \
'took', time.time() - start, 'seconds'
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
samliu/servo | tests/wpt/web-platform-tests/referrer-policy/generic/subresource/subresource.py | 50 | 3167 | import os, sys, json, urlparse, urllib
def get_template(template_basename):
script_directory = os.path.dirname(os.path.abspath(__file__))
template_directory = os.path.abspath(os.path.join(script_directory,
"..",
"template"))
template_filename = os.path.join(template_directory, template_basename);
with open(template_filename, "r") as f:
return f.read()
# TODO(kristijanburnik): subdomain_prefix is a hardcoded value aligned with
# referrer-policy-test-case.js. The prefix should be configured in one place.
def get_swapped_origin_netloc(netloc, subdomain_prefix = "www1."):
if netloc.startswith(subdomain_prefix):
return netloc[len(subdomain_prefix):]
else:
return subdomain_prefix + netloc
def create_redirect_url(request, cross_origin = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
if cross_origin:
destination_netloc = get_swapped_origin_netloc(parsed.netloc)
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = parsed.scheme,
netloc = destination_netloc,
path = parsed.path,
query = None,
fragment = None))
return destination_url
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def preprocess_redirection(request, response):
if "redirection" not in request.GET:
return False
redirection = request.GET["redirection"]
if redirection == "no-redirect":
return False
elif redirection == "keep-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = False)
elif redirection == "swap-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = True)
else:
raise ValueError("Invalid redirection type '%s'" % redirection)
redirect(redirect_url, response)
return True
def __noop(request, response):
return ""
def respond(request,
response,
status_code = 200,
content_type = "text/html",
payload_generator = __noop,
cache_control = "no-cache; must-revalidate",
access_control_allow_origin = "*"):
if preprocess_redirection(request, response):
return
response.add_required_headers = False
response.writer.write_status(status_code)
if access_control_allow_origin != None:
response.writer.write_header("access-control-allow-origin",
access_control_allow_origin)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", cache_control)
response.writer.end_headers()
server_data = {"headers": json.dumps(request.headers, indent = 4)}
payload = payload_generator(server_data)
response.writer.write(payload)
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.