repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
GNS3/gns3-server | tests/handlers/api/controller/test_drawing.py | 1 | 3125 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gns3server.controller.drawing import Drawing
async def test_create_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
assert response.status == 201
assert response.json["drawing_id"] is not None
async def test_get_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.get("/projects/{}/drawings/{}".format(project.id, response.json["drawing_id"]))
assert response.status == 200
assert response.json["x"] == 10
async def test_update_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
response = await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.put("/projects/{}/drawings/{}".format(project.id, response.json["drawing_id"]), {"x": 42})
assert response.status == 201
assert response.json["x"] == 42
async def test_list_drawing(controller_api, project):
params = {
"svg": '<svg height="210" width="500"><line x1="0" y1="0" x2="200" y2="200" style="stroke:rgb(255,0,0);stroke-width:2" /></svg>',
"x": 10,
"y": 20,
"z": 0
}
await controller_api.post("/projects/{}/drawings".format(project.id), params)
response = await controller_api.get("/projects/{}/drawings".format(project.id))
assert response.status == 200
assert len(response.json) == 1
async def test_delete_drawing(controller_api, project):
drawing = Drawing(project)
project._drawings = {drawing.id: drawing}
response = await controller_api.delete("/projects/{}/drawings/{}".format(project.id, drawing.id))
assert response.status == 204
assert drawing.id not in project.drawings
| gpl-3.0 |
pombredanne/pulp | client_lib/pulp/client/commands/repo/history.py | 17 | 7546 | """
Commands for showing a repository's sync and publish history
"""
from gettext import gettext as _
from pulp.client.commands.options import OPTION_REPO_ID
from pulp.client.extensions.extensions import PulpCliOption, PulpCliFlag, PulpCliCommand
from pulp.client import validators
# The default limit on the number of history entries to display
REPO_HISTORY_LIMIT = 5
# Descriptions
DESC_DETAILS = _('if specified, all history information is displayed')
DESC_DISTRIBUTOR_ID = _('the distributor id to display history entries for')
DESC_END_DATE = _('only return entries that occur on or before the given date in iso8601 format'
' (yyyy-mm-ddThh:mm:ssZ)')
DESC_LIMIT = _(
'limits displayed history entries to the given amount (must be greater than zero); the default'
' is %(limit)s' % {'limit': REPO_HISTORY_LIMIT})
DESC_PUBLISH_HISTORY = _('displays the history of publish operations on a repository')
DESC_SORT = _('indicates the sort direction ("ascending" or "descending") based on the timestamp')
DESC_SYNC_HISTORY = _('displays the history of sync operations on a repository')
DESC_START_DATE = _('only return entries that occur on or after the given date in iso8601 format'
' (yyyy-mm-ddThh:mm:ssZ)')
# Options
OPTION_END_DATE = PulpCliOption('--end-date', DESC_END_DATE, required=False,
validate_func=validators.iso8601_datetime_validator)
OPTION_LIMIT = PulpCliOption('--limit', DESC_LIMIT, required=False,
validate_func=validators.positive_int_validator)
OPTION_SORT = PulpCliOption('--sort', DESC_SORT, required=False)
OPTION_DISTRIBUTOR_ID = PulpCliOption('--distributor-id', DESC_DISTRIBUTOR_ID, required=True,
validate_func=validators.id_validator)
OPTION_START_DATE = PulpCliOption('--start-date', DESC_START_DATE, required=False,
validate_func=validators.iso8601_datetime_validator)
# Flags
FLAG_DETAILS = PulpCliFlag('--details', DESC_DETAILS, aliases='-d')
class SyncHistoryCommand(PulpCliCommand):
"""
Displays the sync history of a given repository
"""
def __init__(self, context, name='sync', description=DESC_SYNC_HISTORY):
"""
:param context: The client context used to interact with the client framework and server
:type context: pulp.client.extensions.core.ClientContext
:param name: The name of the command in the history section
:type name: str
:param description: The description to use in the cli
:type description: str
"""
# The context is used to access the server and prompt.
self.context = context
super(SyncHistoryCommand, self).__init__(name, description, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_LIMIT)
self.add_option(OPTION_SORT)
self.add_option(OPTION_START_DATE)
self.add_option(OPTION_END_DATE)
self.add_flag(FLAG_DETAILS)
self.fields_to_display = ['repo_id', 'result', 'started', 'completed', 'added_count',
'removed_count', 'updated_count']
def run(self, **user_input):
"""
The action to take when the sync history command is executed
:param user_input: the options and flags provided by the user
:type user_input: dict
"""
# Collect input
repo_id = user_input[OPTION_REPO_ID.keyword]
if user_input[OPTION_LIMIT.keyword] is not None:
limit = int(user_input[OPTION_LIMIT.keyword])
else:
limit = REPO_HISTORY_LIMIT
start_date = user_input[OPTION_START_DATE.keyword]
end_date = user_input[OPTION_END_DATE.keyword]
sort = user_input[OPTION_SORT.keyword]
details = user_input[FLAG_DETAILS.keyword]
# Request the sync history from the server
sync_list = self.context.server.repo_history.sync_history(repo_id, limit, sort, start_date,
end_date).response_body
# Filter the fields to show and define the order in which they are displayed
if details is True:
self.fields_to_display.append('summary')
self.fields_to_display.append('details')
filters = order = self.fields_to_display
# Render results
title = _('Sync History [ %(repo)s ]') % {'repo': repo_id}
self.context.prompt.render_title(title)
self.context.prompt.render_document_list(sync_list, filters=filters, order=order)
class PublishHistoryCommand(PulpCliCommand):
"""
Displays the publish history of a given repository and publisher
"""
def __init__(self, context, name='publish', description=DESC_PUBLISH_HISTORY):
"""
:param context: The client context used to interact with the client framework and server
:type context: pulp.client.extensions.core.ClientContext
:param name: The name of the command in the history section
:type name: str
:param description: The description to use in the cli
:type description: str
"""
# The context is used to access the server and prompt.
self.context = context
super(PublishHistoryCommand, self).__init__(name, description, self.run)
# History is given for a repo id and distributor id pair, so these are mandatory
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_DISTRIBUTOR_ID)
self.add_option(OPTION_LIMIT)
self.add_option(OPTION_SORT)
self.add_option(OPTION_START_DATE)
self.add_option(OPTION_END_DATE)
self.add_flag(FLAG_DETAILS)
# Set the default fields to display
self.fields_to_display = ['repo_id', 'distributor_id', 'result', 'started', 'completed']
def run(self, **user_input):
"""
The action to take when the sync history command is executed
:param user_input: the options and flags provided by the user
:type user_input: dict
"""
# Collect input
repo_id = user_input[OPTION_REPO_ID.keyword]
distributor_id = user_input[OPTION_DISTRIBUTOR_ID.keyword]
if user_input[OPTION_LIMIT.keyword] is not None:
limit = int(user_input[OPTION_LIMIT.keyword])
else:
limit = REPO_HISTORY_LIMIT
start_date = user_input[OPTION_START_DATE.keyword]
end_date = user_input[OPTION_END_DATE.keyword]
sort = user_input[OPTION_SORT.keyword]
details = user_input[FLAG_DETAILS.keyword]
# Request the publish history from the server
publish_list = self.context.server.repo_history.publish_history(repo_id, distributor_id,
limit, sort, start_date,
end_date)
publish_list = publish_list.response_body
# Filter the fields to show and define the order in which they are displayed
if details is True:
self.fields_to_display.append('summary')
self.fields_to_display.append('details')
filters = order = self.fields_to_display
# Render results
title = _('Publish History [ %(repo)s ]') % {'repo': repo_id}
self.context.prompt.render_title(title)
self.context.prompt.render_document_list(publish_list, filters=filters, order=order)
| gpl-2.0 |
zzicewind/nova | nova/virt/vmwareapi/ds_util.py | 11 | 17618 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Datastore utility functions
"""
from oslo_log import log as logging
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.i18n import _, _LE, _LI
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_DS_TYPES = frozenset([constants.DATASTORE_TYPE_VMFS,
constants.DATASTORE_TYPE_NFS,
constants.DATASTORE_TYPE_NFS41,
constants.DATASTORE_TYPE_VSAN])
def _select_datastore(session, data_stores, best_match, datastore_regex=None,
storage_policy=None,
allowed_ds_types=ALL_SUPPORTED_DS_TYPES):
"""Find the most preferable datastore in a given RetrieveResult object.
:param session: vmwareapi session
:param data_stores: a RetrieveResult object from vSphere API call
:param best_match: the current best match for datastore
:param datastore_regex: an optional regular expression to match names
:param storage_policy: storage policy for the datastore
:param allowed_ds_types: a list of acceptable datastore type names
:return: datastore_ref, datastore_name, capacity, freespace
"""
if storage_policy:
matching_ds = _filter_datastores_matching_storage_policy(
session, data_stores, storage_policy)
if not matching_ds:
return best_match
else:
matching_ds = data_stores
# data_stores is actually a RetrieveResult object from vSphere API call
for obj_content in matching_ds.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = vm_util.propset_dict(obj_content.propSet)
if _is_datastore_valid(propdict, datastore_regex, allowed_ds_types):
new_ds = ds_obj.Datastore(
ref=obj_content.obj,
name=propdict['summary.name'],
capacity=propdict['summary.capacity'],
freespace=propdict['summary.freeSpace'])
# favor datastores with more free space
if (best_match is None or
new_ds.freespace > best_match.freespace):
best_match = new_ds
return best_match
def _is_datastore_valid(propdict, datastore_regex, ds_types):
"""Checks if a datastore is valid based on the following criteria.
Criteria:
- Datastore is accessible
- Datastore is not in maintenance mode (optional)
- Datastore's type is one of the given ds_types
- Datastore matches the supplied regex (optional)
:param propdict: datastore summary dict
:param datastore_regex : Regex to match the name of a datastore.
"""
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
return (propdict.get('summary.accessible') and
(propdict.get('summary.maintenanceMode') is None or
propdict.get('summary.maintenanceMode') == 'normal') and
propdict['summary.type'] in ds_types and
(datastore_regex is None or
datastore_regex.match(propdict['summary.name'])))
def get_datastore(session, cluster, datastore_regex=None,
storage_policy=None,
allowed_ds_types=ALL_SUPPORTED_DS_TYPES):
"""Get the datastore list and choose the most preferable one."""
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource", "datastore")
# If there are no hosts in the cluster then an empty string is
# returned
if not datastore_ret:
raise exception.DatastoreNotFound()
data_store_mors = datastore_ret.ManagedObjectReference
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible",
"summary.maintenanceMode"])
best_match = None
while data_stores:
best_match = _select_datastore(session,
data_stores,
best_match,
datastore_regex,
storage_policy,
allowed_ds_types)
data_stores = session._call_method(vutil, 'continue_retrieval',
data_stores)
if best_match:
return best_match
if storage_policy:
raise exception.DatastoreNotFound(
_("Storage policy %s did not match any datastores")
% storage_policy)
elif datastore_regex:
raise exception.DatastoreNotFound(
_("Datastore regex %s did not match any datastores")
% datastore_regex.pattern)
else:
raise exception.DatastoreNotFound()
def _get_allowed_datastores(data_stores, datastore_regex):
allowed = []
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = vm_util.propset_dict(obj_content.propSet)
if _is_datastore_valid(propdict,
datastore_regex,
ALL_SUPPORTED_DS_TYPES):
allowed.append(ds_obj.Datastore(ref=obj_content.obj,
name=propdict['summary.name']))
return allowed
def get_available_datastores(session, cluster=None, datastore_regex=None):
"""Get the datastore list and choose the first local storage."""
ds = session._call_method(vim_util, "get_dynamic_property", cluster,
"ClusterComputeResource", "datastore")
if not ds:
return []
data_store_mors = ds.ManagedObjectReference
# NOTE(garyk): use utility method to retrieve remote objects
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name", "summary.accessible",
"summary.maintenanceMode"])
allowed = []
while data_stores:
allowed.extend(_get_allowed_datastores(data_stores, datastore_regex))
data_stores = session._call_method(vutil, 'continue_retrieval',
data_stores)
return allowed
def get_allowed_datastore_types(disk_type):
if disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
return ALL_SUPPORTED_DS_TYPES
return ALL_SUPPORTED_DS_TYPES - frozenset([constants.DATASTORE_TYPE_VSAN])
def file_delete(session, ds_path, dc_ref):
LOG.debug("Deleting the datastore file %s", ds_path)
vim = session.vim
file_delete_task = session._call_method(
vim,
"DeleteDatastoreFile_Task",
vim.service_content.fileManager,
name=str(ds_path),
datacenter=dc_ref)
session._wait_for_task(file_delete_task)
LOG.debug("Deleted the datastore file")
def file_copy(session, src_file, src_dc_ref, dst_file, dst_dc_ref):
LOG.debug("Copying the datastore file from %(src)s to %(dst)s",
{'src': src_file, 'dst': dst_file})
vim = session.vim
copy_task = session._call_method(
vim,
"CopyDatastoreFile_Task",
vim.service_content.fileManager,
sourceName=src_file,
sourceDatacenter=src_dc_ref,
destinationName=dst_file,
destinationDatacenter=dst_dc_ref)
session._wait_for_task(copy_task)
LOG.debug("Copied the datastore file")
def disk_move(session, dc_ref, src_file, dst_file):
"""Moves the source virtual disk to the destination.
The list of possible faults that the server can return on error
include:
* CannotAccessFile: Thrown if the source file or folder cannot be
moved because of insufficient permissions.
* FileAlreadyExists: Thrown if a file with the given name already
exists at the destination.
* FileFault: Thrown if there is a generic file error
* FileLocked: Thrown if the source file or folder is currently
locked or in use.
* FileNotFound: Thrown if the file or folder specified by sourceName
is not found.
* InvalidDatastore: Thrown if the operation cannot be performed on
the source or destination datastores.
* NoDiskSpace: Thrown if there is not enough space available on the
destination datastore.
* RuntimeFault: Thrown if any type of runtime fault is thrown that
is not covered by the other faults; for example,
a communication error.
"""
LOG.debug("Moving virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
move_task = session._call_method(
session.vim,
"MoveVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
sourceName=str(src_file),
sourceDatacenter=dc_ref,
destName=str(dst_file),
destDatacenter=dc_ref,
force=False)
session._wait_for_task(move_task)
LOG.info(_LI("Moved virtual disk from %(src)s to %(dst)s."),
{'src': src_file, 'dst': dst_file})
def disk_copy(session, dc_ref, src_file, dst_file):
"""Copies the source virtual disk to the destination."""
LOG.debug("Copying virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
copy_disk_task = session._call_method(
session.vim,
"CopyVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
sourceName=str(src_file),
sourceDatacenter=dc_ref,
destName=str(dst_file),
destDatacenter=dc_ref,
force=False)
session._wait_for_task(copy_disk_task)
LOG.info(_LI("Copied virtual disk from %(src)s to %(dst)s."),
{'src': src_file, 'dst': dst_file})
def disk_delete(session, dc_ref, file_path):
"""Deletes a virtual disk."""
LOG.debug("Deleting virtual disk %s", file_path)
delete_disk_task = session._call_method(
session.vim,
"DeleteVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
name=str(file_path),
datacenter=dc_ref)
session._wait_for_task(delete_disk_task)
LOG.info(_LI("Deleted virtual disk %s."), file_path)
def file_move(session, dc_ref, src_file, dst_file):
"""Moves the source file or folder to the destination.
The list of possible faults that the server can return on error
include:
* CannotAccessFile: Thrown if the source file or folder cannot be
moved because of insufficient permissions.
* FileAlreadyExists: Thrown if a file with the given name already
exists at the destination.
* FileFault: Thrown if there is a generic file error
* FileLocked: Thrown if the source file or folder is currently
locked or in use.
* FileNotFound: Thrown if the file or folder specified by sourceName
is not found.
* InvalidDatastore: Thrown if the operation cannot be performed on
the source or destination datastores.
* NoDiskSpace: Thrown if there is not enough space available on the
destination datastore.
* RuntimeFault: Thrown if any type of runtime fault is thrown that
is not covered by the other faults; for example,
a communication error.
"""
LOG.debug("Moving file from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
vim = session.vim
move_task = session._call_method(
vim,
"MoveDatastoreFile_Task",
vim.service_content.fileManager,
sourceName=str(src_file),
sourceDatacenter=dc_ref,
destinationName=str(dst_file),
destinationDatacenter=dc_ref)
session._wait_for_task(move_task)
LOG.debug("File moved")
def search_datastore_spec(client_factory, file_name):
"""Builds the datastore search spec."""
search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
search_spec.matchPattern = [file_name]
search_spec.details = client_factory.create('ns0:FileQueryFlags')
search_spec.details.fileOwner = False
search_spec.details.fileSize = True
search_spec.details.fileType = False
search_spec.details.modification = False
return search_spec
def file_exists(session, ds_browser, ds_path, file_name):
"""Check if the file exists on the datastore."""
client_factory = session.vim.client.factory
search_spec = search_datastore_spec(client_factory, file_name)
search_task = session._call_method(session.vim,
"SearchDatastore_Task",
ds_browser,
datastorePath=str(ds_path),
searchSpec=search_spec)
try:
task_info = session._wait_for_task(search_task)
except vexc.FileNotFoundException:
return False
file_exists = (getattr(task_info.result, 'file', False) and
task_info.result.file[0].path == file_name)
return file_exists
def file_size(session, ds_browser, ds_path, file_name):
"""Returns the size of the specified file."""
client_factory = session.vim.client.factory
search_spec = search_datastore_spec(client_factory, file_name)
search_task = session._call_method(session.vim,
"SearchDatastore_Task",
ds_browser,
datastorePath=str(ds_path),
searchSpec=search_spec)
task_info = session._wait_for_task(search_task)
if hasattr(task_info.result, 'file'):
return task_info.result.file[0].fileSize
def mkdir(session, ds_path, dc_ref):
"""Creates a directory at the path specified. If it is just "NAME",
then a directory with this name is created at the topmost level of the
DataStore.
"""
LOG.debug("Creating directory with path %s", ds_path)
session._call_method(session.vim, "MakeDirectory",
session.vim.service_content.fileManager,
name=str(ds_path), datacenter=dc_ref,
createParentDirectories=True)
LOG.debug("Created directory with path %s", ds_path)
def get_sub_folders(session, ds_browser, ds_path):
"""Return a set of subfolders for a path on a datastore.
If the path does not exist then an empty set is returned.
"""
search_task = session._call_method(
session.vim,
"SearchDatastore_Task",
ds_browser,
datastorePath=str(ds_path))
try:
task_info = session._wait_for_task(search_task)
except vexc.FileNotFoundException:
return set()
# populate the folder entries
if hasattr(task_info.result, 'file'):
return set([file.path for file in task_info.result.file])
return set()
def _filter_datastores_matching_storage_policy(session, data_stores,
storage_policy):
"""Get datastores matching the given storage policy.
:param data_stores: the list of retrieve result wrapped datastore objects
:param storage_policy: the storage policy name
:return the list of datastores conforming to the given storage policy
"""
profile_id = pbm.get_profile_id_by_name(session, storage_policy)
if profile_id:
factory = session.pbm.client.factory
ds_mors = [oc.obj for oc in data_stores.objects]
hubs = pbm.convert_datastores_to_hubs(factory, ds_mors)
matching_hubs = pbm.filter_hubs_by_profile(session, hubs,
profile_id)
if matching_hubs:
matching_ds = pbm.filter_datastores_by_hubs(matching_hubs,
ds_mors)
object_contents = [oc for oc in data_stores.objects
if oc.obj in matching_ds]
data_stores.objects = object_contents
return data_stores
LOG.error(_LE("Unable to retrieve storage policy with name %s"),
storage_policy)
| apache-2.0 |
geobretagne/mdchecker | app/runserver.py | 1 | 1687 | import optparse
from mdchecker import app
# courtesy of http://flask.pocoo.org/snippets/133/
def flaskrun(app, default_host="127.0.0.1",
default_port="5000"):
"""
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
"""
# Set up the command-line options
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app " + \
"[default %s]" % default_host,
default=default_host)
parser.add_option("-P", "--port",
help="Port for the Flask app " + \
"[default %s]" % default_port,
default=default_port)
# Two options useful for debugging purposes, but
# a bit dangerous so not exposed in the help message.
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help=optparse.SUPPRESS_HELP)
parser.add_option("-p", "--profile",
action="store_true", dest="profile",
help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
# If the user selects the profiling option, then we need
# to do a little extra setup
if options.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app,
restrictions=[30])
options.debug = True
app.run(
debug=options.debug,
host=options.host,
port=int(options.port)
)
flaskrun(app)
| gpl-3.0 |
elliot79313/tra-tracking-on-gae | simplejson/tests/test_item_sort_key.py | 140 | 1127 | from unittest import TestCase
import simplejson as json
from operator import itemgetter
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
| mit |
EvanK/ansible | test/units/modules/network/f5/test_bigip_profile_persistence_src_addr.py | 21 | 4058 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_persistence_src_addr import ApiParameters
from library.modules.bigip_profile_persistence_src_addr import ModuleParameters
from library.modules.bigip_profile_persistence_src_addr import ModuleManager
from library.modules.bigip_profile_persistence_src_addr import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_persistence_src_addr import ApiParameters
from ansible.modules.network.f5.bigip_profile_persistence_src_addr import ModuleParameters
from ansible.modules.network.f5.bigip_profile_persistence_src_addr import ModuleManager
from ansible.modules.network.f5.bigip_profile_persistence_src_addr import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
match_across_services=False,
match_across_virtuals=True,
match_across_pools=False,
hash_algorithm='carp',
entry_timeout=100,
override_connection_limit=True
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.match_across_services == 'no'
assert p.match_across_virtuals == 'yes'
assert p.match_across_pools == 'no'
assert p.hash_algorithm == 'carp'
assert p.entry_timeout == 100
assert p.override_connection_limit == 'yes'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_persistence_src_addr_1.json')
p = ApiParameters(params=args)
assert p.name == 'source_addr'
assert p.match_across_pools == 'no'
assert p.match_across_services == 'no'
assert p.match_across_virtuals == 'no'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
match_across_virtuals='yes',
parent='bar',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['match_across_virtuals'] == 'yes'
| gpl-3.0 |
fuhongliang/erpnext | erpnext/accounts/doctype/account/chart_of_accounts/import_from_openerp.py | 87 | 8747 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
"""
Import chart of accounts from OpenERP sources
"""
from __future__ import unicode_literals
import os, json
import ast
from xml.etree import ElementTree as ET
from frappe.utils.csvutils import read_csv_content
import frappe
path = "/Users/nabinhait/projects/odoo/addons"
accounts = {}
charts = {}
all_account_types = []
all_roots = {}
def go():
global accounts, charts
default_account_types = get_default_account_types()
country_dirs = []
for basepath, folders, files in os.walk(path):
basename = os.path.basename(basepath)
if basename.startswith("l10n_"):
country_dirs.append(basename)
for country_dir in country_dirs:
accounts, charts = {}, {}
country_path = os.path.join(path, country_dir)
manifest = ast.literal_eval(open(os.path.join(country_path, "__openerp__.py")).read())
data_files = manifest.get("data", []) + manifest.get("init_xml", []) + \
manifest.get("update_xml", [])
files_path = [os.path.join(country_path, d) for d in data_files]
xml_roots = get_xml_roots(files_path)
csv_content = get_csv_contents(files_path)
prefix = country_dir if csv_content else None
account_types = get_account_types(xml_roots.get("account.account.type", []),
csv_content.get("account.account.type", []), prefix)
account_types.update(default_account_types)
if xml_roots:
make_maps_for_xml(xml_roots, account_types, country_dir)
if csv_content:
make_maps_for_csv(csv_content, account_types, country_dir)
make_account_trees()
make_charts()
create_all_roots_file()
def get_default_account_types():
default_types_root = []
default_types_root.append(ET.parse(os.path.join(path, "account", "data",
"data_account_type.xml")).getroot())
return get_account_types(default_types_root, None, prefix="account")
def get_xml_roots(files_path):
xml_roots = frappe._dict()
for filepath in files_path:
fname = os.path.basename(filepath)
if fname.endswith(".xml"):
tree = ET.parse(filepath)
root = tree.getroot()
for node in root[0].findall("record"):
if node.get("model") in ["account.account.template",
"account.chart.template", "account.account.type"]:
xml_roots.setdefault(node.get("model"), []).append(root)
break
return xml_roots
def get_csv_contents(files_path):
csv_content = {}
for filepath in files_path:
fname = os.path.basename(filepath)
for file_type in ["account.account.template", "account.account.type",
"account.chart.template"]:
if fname.startswith(file_type) and fname.endswith(".csv"):
with open(filepath, "r") as csvfile:
try:
csv_content.setdefault(file_type, [])\
.append(read_csv_content(csvfile.read()))
except Exception, e:
continue
return csv_content
def get_account_types(root_list, csv_content, prefix=None):
types = {}
account_type_map = {
'cash': 'Cash',
'bank': 'Bank',
'tr_cash': 'Cash',
'tr_bank': 'Bank',
'receivable': 'Receivable',
'tr_receivable': 'Receivable',
'account rec': 'Receivable',
'payable': 'Payable',
'tr_payable': 'Payable',
'equity': 'Equity',
'stocks': 'Stock',
'stock': 'Stock',
'tax': 'Tax',
'tr_tax': 'Tax',
'tax-out': 'Tax',
'tax-in': 'Tax',
'charges_personnel': 'Chargeable',
'fixed asset': 'Fixed Asset',
'cogs': 'Cost of Goods Sold',
}
for root in root_list:
for node in root[0].findall("record"):
if node.get("model")=="account.account.type":
data = {}
for field in node.findall("field"):
if field.get("name")=="code" and field.text.lower() != "none" \
and account_type_map.get(field.text):
data["account_type"] = account_type_map[field.text]
node_id = prefix + "." + node.get("id") if prefix else node.get("id")
types[node_id] = data
if csv_content and csv_content[0][0]=="id":
for row in csv_content[1:]:
row_dict = dict(zip(csv_content[0], row))
data = {}
if row_dict.get("code") and account_type_map.get(row_dict["code"]):
data["account_type"] = account_type_map[row_dict["code"]]
if data and data.get("id"):
node_id = prefix + "." + data.get("id") if prefix else data.get("id")
types[node_id] = data
return types
def make_maps_for_xml(xml_roots, account_types, country_dir):
"""make maps for `charts` and `accounts`"""
for model, root_list in xml_roots.iteritems():
for root in root_list:
for node in root[0].findall("record"):
if node.get("model")=="account.account.template":
data = {}
for field in node.findall("field"):
if field.get("name")=="name":
data["name"] = field.text
if field.get("name")=="parent_id":
parent_id = field.get("ref") or field.get("eval")
data["parent_id"] = parent_id
if field.get("name")=="user_type":
value = field.get("ref")
if account_types.get(value, {}).get("account_type"):
data["account_type"] = account_types[value]["account_type"]
if data["account_type"] not in all_account_types:
all_account_types.append(data["account_type"])
data["children"] = []
accounts[node.get("id")] = data
if node.get("model")=="account.chart.template":
data = {}
for field in node.findall("field"):
if field.get("name")=="name":
data["name"] = field.text
if field.get("name")=="account_root_id":
data["account_root_id"] = field.get("ref")
data["id"] = country_dir
charts.setdefault(node.get("id"), {}).update(data)
def make_maps_for_csv(csv_content, account_types, country_dir):
for content in csv_content.get("account.account.template", []):
for row in content[1:]:
data = dict(zip(content[0], row))
account = {
"name": data.get("name"),
"parent_id": data.get("parent_id:id") or data.get("parent_id/id"),
"children": []
}
user_type = data.get("user_type/id") or data.get("user_type:id")
if account_types.get(user_type, {}).get("account_type"):
account["account_type"] = account_types[user_type]["account_type"]
if account["account_type"] not in all_account_types:
all_account_types.append(account["account_type"])
accounts[data.get("id")] = account
if not account.get("parent_id") and data.get("chart_template_id:id"):
chart_id = data.get("chart_template_id:id")
charts.setdefault(chart_id, {}).update({"account_root_id": data.get("id")})
for content in csv_content.get("account.chart.template", []):
for row in content[1:]:
if row:
data = dict(zip(content[0], row))
charts.setdefault(data.get("id"), {}).update({
"account_root_id": data.get("account_root_id:id") or \
data.get("account_root_id/id"),
"name": data.get("name"),
"id": country_dir
})
def make_account_trees():
"""build tree hierarchy"""
for id in accounts.keys():
account = accounts[id]
if account.get("parent_id"):
if accounts.get(account["parent_id"]):
# accounts[account["parent_id"]]["children"].append(account)
accounts[account["parent_id"]][account["name"]] = account
del account["parent_id"]
del account["name"]
# remove empty children
for id in accounts.keys():
if "children" in accounts[id] and not accounts[id].get("children"):
del accounts[id]["children"]
def make_charts():
"""write chart files in app/setup/doctype/company/charts"""
for chart_id in charts:
src = charts[chart_id]
if not src.get("name") or not src.get("account_root_id"):
continue
if not src["account_root_id"] in accounts:
continue
filename = src["id"][5:] + "_" + chart_id
print "building " + filename
chart = {}
chart["name"] = src["name"]
chart["country_code"] = src["id"][5:]
chart["tree"] = accounts[src["account_root_id"]]
for key, val in chart["tree"].items():
if key in ["name", "parent_id"]:
chart["tree"].pop(key)
if type(val) == dict:
val["root_type"] = ""
if chart:
fpath = os.path.join("erpnext", "erpnext", "accounts", "doctype", "account",
"chart_of_accounts", filename + ".json")
with open(fpath, "r") as chartfile:
old_content = chartfile.read()
if not old_content or (json.loads(old_content).get("is_active", "No") == "No" \
and json.loads(old_content).get("disabled", "No") == "No"):
with open(fpath, "w") as chartfile:
chartfile.write(json.dumps(chart, indent=4, sort_keys=True))
all_roots.setdefault(filename, chart["tree"].keys())
def create_all_roots_file():
with open('all_roots.txt', 'w') as f:
for filename, roots in sorted(all_roots.items()):
f.write(filename)
f.write('\n----------------------\n')
for r in sorted(roots):
f.write(r.encode('utf-8'))
f.write('\n')
f.write('\n\n\n')
if __name__=="__main__":
go()
| agpl-3.0 |
talbrecht/pism_pik07 | doc/site-packages/pybtex/style/sorting/none.py | 4 | 1296 | # Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.style.sorting import BaseSortingStyle
class SortingStyle(BaseSortingStyle):
name = 'none'
def sort(self, entries):
return entries
| gpl-3.0 |
gale320/newfies-dialer | newfies/dialer_contact/views.py | 3 | 22781 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.decorators import login_required, \
permission_required
from django.http import HttpResponseRedirect, HttpResponse, \
Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.db.models import Count
from dialer_contact.models import Phonebook, Contact
from dialer_contact.forms import ContactSearchForm, Contact_fileImport, \
PhonebookForm, ContactForm
from dialer_contact.constants import PHONEBOOK_COLUMN_NAME, CONTACT_COLUMN_NAME
from dialer_contact.constants import STATUS_CHOICE
from dialer_campaign.function_def import check_dialer_setting,\
dialer_setting_limit, user_dialer_setting_msg, type_field_chk
from user_profile.constants import NOTIFICATION_NAME
from frontend_notification.views import frontend_send_notification
from common.common_functions import striplist, getvar,\
get_pagination_vars, unset_session_var
import csv
import json
@permission_required('dialer_contact.view_phonebook', login_url='/')
@login_required
def phonebook_list(request):
"""Phonebook list for the logged in user
**Attributes**:
* ``template`` - frontend/phonebook/list.html
**Logic Description**:
* List all phonebooks which belong to the logged in user.
"""
sort_col_field_list = ['id', 'name', 'updated_date']
default_sort_field = 'id'
pagination_data = \
get_pagination_vars(request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
phonebook_list = Phonebook.objects\
.annotate(contact_count=Count('contact'))\
.filter(user=request.user).order_by(sort_order)
template = 'frontend/phonebook/list.html'
data = {
'msg': request.session.get('msg'),
'phonebook_list': phonebook_list,
'total_phonebook': phonebook_list.count(),
'PAGE_SIZE': PAGE_SIZE,
'PHONEBOOK_COLUMN_NAME': PHONEBOOK_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_contact.add_phonebook', login_url='/')
@login_required
def phonebook_add(request):
"""Add new Phonebook for the logged in user
**Attributes**:
* ``form`` - PhonebookForm
* ``template`` - frontend/phonebook/change.html
**Logic Description**:
* Add a new phonebook which will belong to the logged in user
via the phonebookForm & get redirected to the phonebook list
"""
form = PhonebookForm()
if request.method == 'POST':
form = PhonebookForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
request.session["msg"] = _('"%(name)s" added.') %\
{'name': request.POST['name']}
return HttpResponseRedirect('/phonebook/')
template = 'frontend/phonebook/change.html'
data = {
'form': form,
'action': 'add',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@login_required
def get_contact_count(request):
"""To get total no of contacts belonging to a phonebook list"""
values = request.GET.getlist('ids')
values = ", ".join(["%s" % el for el in values])
contact_count = Contact.objects.filter(phonebook__user=request.user)\
.extra(where=['phonebook_id IN (%s)' % values]).count()
return HttpResponse(contact_count)
@permission_required('dialer_contact.delete_phonebook', login_url='/')
@login_required
def phonebook_del(request, object_id):
"""Delete a phonebook for a logged in user
**Attributes**:
* ``object_id`` - Selected phonebook object
* ``object_list`` - Selected phonebook objects
**Logic Description**:
* Delete contacts from a contact list belonging to a phonebook list.
* Delete selected the phonebook from the phonebook list
"""
if int(object_id) != 0:
# When object_id is not 0
phonebook = get_object_or_404(
Phonebook, pk=object_id, user=request.user)
# 1) delete all contacts belonging to a phonebook
contact_list = Contact.objects.filter(phonebook=phonebook)
contact_list.delete()
# 2) delete phonebook
request.session["msg"] = _('"%(name)s" is deleted.')\
% {'name': phonebook.name}
phonebook.delete()
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
try:
# 1) delete all contacts belonging to a phonebook
contact_list = Contact.objects\
.filter(phonebook__user=request.user)\
.extra(where=['phonebook_id IN (%s)' % values])
if contact_list:
contact_list.delete()
# 2) delete phonebook
phonebook_list = Phonebook.objects\
.filter(user=request.user)\
.extra(where=['id IN (%s)' % values])
if phonebook_list:
request.session["msg"] =\
_('%(count)s phonebook(s) are deleted.')\
% {'count': phonebook_list.count()}
phonebook_list.delete()
except:
raise Http404
return HttpResponseRedirect('/phonebook/')
@permission_required('dialer_contact.change_phonebook', login_url='/')
@login_required
def phonebook_change(request, object_id):
"""Update/Delete Phonebook for the logged in user
**Attributes**:
* ``object_id`` - Selected phonebook object
* ``form`` - PhonebookForm
* ``template`` - frontend/phonebook/change.html
**Logic Description**:
* Update/delete selected phonebook from the phonebook list
via PhonebookForm & get redirected to phonebook list
"""
phonebook = get_object_or_404(Phonebook, pk=object_id, user=request.user)
form = PhonebookForm(instance=phonebook)
if request.method == 'POST':
if request.POST.get('delete'):
return HttpResponseRedirect('/phonebook/del/%s/' % object_id)
else:
form = PhonebookForm(request.POST, instance=phonebook)
if form.is_valid():
form.save()
request.session["msg"] = _('"%(name)s" is updated.') \
% {'name': request.POST['name']}
return HttpResponseRedirect('/phonebook/')
template = 'frontend/phonebook/change.html'
data = {
'form': form,
'action': 'update',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_contact.view_contact', login_url='/')
@login_required
def contact_list(request):
"""Contact list for the logged in user
**Attributes**:
* ``template`` - frontend/contact/list.html
* ``form`` - ContactSearchForm
**Logic Description**:
* List all contacts from phonebooks belonging to the logged in user
"""
sort_col_field_list = ['id', 'phonebook', 'contact', 'status',
'first_name', 'last_name', 'email',
'updated_date']
default_sort_field = 'id'
pagination_data = get_pagination_vars(
request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
start_page = pagination_data['start_page']
end_page = pagination_data['end_page']
form = ContactSearchForm(request.user)
phonebook_id_list = Phonebook.objects.values_list('id', flat=True)\
.filter(user=request.user)
search_tag = 1
contact_no = ''
contact_name = ''
phonebook = ''
contact_status = STATUS_CHOICE.ALL
if request.method == 'POST':
form = ContactSearchForm(request.user, request.POST)
if form.is_valid():
field_list = ['contact_no', 'contact_name',
'contact_status', 'phonebook']
unset_session_var(request, field_list)
contact_no = getvar(request, 'contact_no', setsession=True)
contact_name = getvar(request, 'contact_name', setsession=True)
contact_status = getvar(request, 'contact_status', setsession=True)
phonebook = getvar(request, 'phonebook', setsession=True)
post_var_with_page = 0
try:
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
contact_no = request.session.get('session_contact_no')
contact_name = request.session.get('session_contact_name')
contact_status = request.session.get('session_contact_status')
phonebook = request.session.get('session_phonebook')
form = ContactSearchForm(request.user, initial={'contact_no': contact_no,
'contact_name': contact_name,
'status': contact_status,
'phonebook': phonebook})
else:
post_var_with_page = 1
if request.method == 'GET':
post_var_with_page = 0
except:
pass
if post_var_with_page == 0:
# default
# unset session var
field_list = ['contact_no', 'contact_name',
'contact_status', 'phonebook']
unset_session_var(request, field_list)
kwargs = {}
if phonebook and phonebook != '0':
kwargs['phonebook'] = phonebook
if contact_status and int(contact_status) != STATUS_CHOICE.ALL:
kwargs['status'] = contact_status
contact_no_type = '1'
contact_no = type_field_chk(contact_no, contact_no_type, 'contact')
for i in contact_no:
kwargs[i] = contact_no[i]
contact_list = []
all_contact_list = []
contact_count = 0
if phonebook_id_list:
contact_list = Contact.objects.values('id', 'phonebook__name', 'contact',
'last_name', 'first_name', 'email', 'status', 'updated_date')\
.filter(phonebook__in=phonebook_id_list)
if kwargs:
contact_list = contact_list.filter(**kwargs)
if contact_name:
# Search on contact name
q = (Q(last_name__icontains=contact_name) |
Q(first_name__icontains=contact_name))
if q:
contact_list = contact_list.filter(q)
all_contact_list = contact_list.order_by(sort_order)
contact_list = all_contact_list[start_page:end_page]
contact_count = all_contact_list.count()
template = 'frontend/contact/list.html'
data = {
'contact_list': contact_list,
'all_contact_list': all_contact_list,
'total_contacts': contact_count,
'PAGE_SIZE': PAGE_SIZE,
'CONTACT_COLUMN_NAME': CONTACT_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'form': form,
'dialer_setting_msg': user_dialer_setting_msg(request.user),
'search_tag': search_tag,
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_contact.add_contact', login_url='/')
@login_required
def contact_add(request):
"""Add a new contact into the selected phonebook for the logged in user
**Attributes**:
* ``form`` - ContactForm
* ``template`` - frontend/contact/change.html
**Logic Description**:
* Before adding a contact, check dialer setting limit if applicable
to the user.
* Add new contact belonging to the logged in user
via ContactForm & get redirected to the contact list
"""
# Check dialer setting limit
if request.user and request.method == 'POST':
if check_dialer_setting(request, check_for="contact"):
request.session['msg'] = \
_("you have too many contacts. you are allowed a maximum of %(limit)s") % \
{'limit': dialer_setting_limit(request, limit_for="contact")}
# contact limit reached
frontend_send_notification(request, NOTIFICATION_NAME.contact_limit_reached)
return HttpResponseRedirect("/contact/")
form = ContactForm(request.user)
error_msg = False
# Add contact
if request.method == 'POST':
form = ContactForm(request.user, request.POST)
if form.is_valid():
form.save()
request.session["msg"] = _('"%s" is added.') % request.POST['contact']
return HttpResponseRedirect('/contact/')
else:
if len(request.POST['contact']) > 0:
error_msg = _('"%s" cannot be added.') % request.POST['contact']
phonebook_count = Phonebook.objects.filter(user=request.user).count()
template = 'frontend/contact/change.html'
data = {
'form': form,
'action': 'add',
'error_msg': error_msg,
'phonebook_count': phonebook_count,
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('dialer_contact.delete_contact', login_url='/')
@login_required
def contact_del(request, object_id):
"""Delete contact for the logged in user
**Attributes**:
* ``object_id`` - Selected contact object
* ``object_list`` - Selected contact objects
**Logic Description**:
* Delete selected contact from the contact list
"""
if int(object_id) != 0:
# When object_id is not 0
contact = get_object_or_404(
Contact, pk=object_id, phonebook__user=request.user)
# Delete contact
request.session["msg"] = _('"%s" is deleted.') % contact.contact
contact.delete()
else:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
try:
contact_list = Contact.objects.extra(where=['id IN (%s)' % values])
if contact_list:
request.session["msg"] =\
_('%s contact(s) are deleted.') % contact_list.count()
contact_list.delete()
except:
raise Http404
return HttpResponseRedirect('/contact/')
@permission_required('dialer_contact.change_contact', login_url='/')
@login_required
def contact_change(request, object_id):
"""Update/Delete contact for the logged in user
**Attributes**:
* ``object_id`` - Selected contact object
* ``form`` - ContactForm
* ``template`` - frontend/contact/change.html
**Logic Description**:
* Update/delete selected contact from the contact list
via ContactForm & get redirected to the contact list
"""
contact = get_object_or_404(
Contact, pk=object_id, phonebook__user=request.user)
form = ContactForm(request.user, instance=contact)
if request.method == 'POST':
# Delete contact
if request.POST.get('delete'):
return HttpResponseRedirect('/contact/del/%s/' % object_id)
else:
# Update contact
form = ContactForm(request.user, request.POST, instance=contact)
if form.is_valid():
form.save()
request.session["msg"] = _('"%s" is updated.') % request.POST['contact']
return HttpResponseRedirect('/contact/')
template = 'frontend/contact/change.html'
data = {
'form': form,
'action': 'update',
'dialer_setting_msg': user_dialer_setting_msg(request.user),
}
return render_to_response(template, data,
context_instance=RequestContext(request))
@login_required
def contact_import(request):
"""Import CSV file of Contacts for the logged in user
**Attributes**:
* ``form`` - Contact_fileImport
* ``template`` - frontend/contact/import_contact.html
**Logic Description**:
* Before adding contacts, check dialer setting limit if applicable
to the user.
* Add new contacts which will belong to the logged in user
via csv file & get the result (upload success and failure
statistics)
**Important variable**:
* total_rows - Total no. of records in the CSV file
* retail_record_count - No. of records imported from the CSV file
"""
# Check dialer setting limit
if request.user and request.method == 'POST':
# check Max Number of contacts
if check_dialer_setting(request, check_for="contact"):
request.session['msg'] = \
_("you have too many contacts. you are allowed a maximum of %(limit)s") % \
{'limit': dialer_setting_limit(request, limit_for="contact")}
# contact limit reached
frontend_send_notification(request, NOTIFICATION_NAME.contact_limit_reached)
return HttpResponseRedirect("/contact/")
form = Contact_fileImport(request.user)
csv_data = ''
msg = ''
error_msg = ''
success_import_list = []
type_error_import_list = []
contact_cnt = 0
bulk_record = []
if request.method == 'POST':
form = Contact_fileImport(request.user, request.POST, request.FILES)
if form.is_valid():
# col_no - field name
# 0 - contact
# 1 - last_name
# 2 - first_name
# 3 - email
# 4 - description
# 5 - status
# 6 - address
# 7 - city
# 8 - country
# 9 - country
# 10 - unit_number
# 11 - additional_vars
# To count total rows of CSV file
records = csv.reader(request.FILES['csv_file'],
delimiter='|', quotechar='"')
total_rows = len(list(records))
BULK_SIZE = 1000
csv_data = csv.reader(request.FILES['csv_file'],
delimiter='|', quotechar='"')
#Get Phonebook Obj
phonebook = get_object_or_404(
Phonebook, pk=request.POST['phonebook'],
user=request.user)
#Read each Row
for row in csv_data:
row = striplist(row)
if not row or str(row[0]) == 0:
continue
#Check field type
if not int(row[5]):
error_msg = _("invalid value for import! please check the import samples or phonebook is not valid")
type_error_import_list.append(row)
break
if len(row[9]) > 2:
error_msg = _("invalid value for country code, it needs to be a valid ISO 3166-1 alpha-2 codes (http://en.wikipedia.org/wiki/ISO_3166-1)")
type_error_import_list.append(row)
break
row_11 = ''
if row[11]:
try:
row_11 = json.loads(row[11])
except:
row_11 = ''
bulk_record.append(
Contact(
phonebook=phonebook,
contact=row[0],
last_name=row[1],
first_name=row[2],
email=row[3],
description=row[4],
status=int(row[5]),
address=row[6],
city=row[7],
state=row[8],
country=row[9], # Note: country needs to be a country code (CA, ES)
unit_number=row[10],
additional_vars=row_11)
)
contact_cnt = contact_cnt + 1
if contact_cnt < 100:
#We want to display only 100 lines of the success import
success_import_list.append(row)
if contact_cnt % BULK_SIZE == 0:
#Bulk insert
Contact.objects.bulk_create(bulk_record)
bulk_record = []
# remaining record
Contact.objects.bulk_create(bulk_record)
bulk_record = []
#check if there is contact imported
if contact_cnt > 0:
msg = _('%(contact_cnt)s contact(s) have been uploaded successfully out of %(total_rows)s row(s)!') \
% {'contact_cnt': contact_cnt,
'total_rows': total_rows}
data = RequestContext(request, {
'form': form,
'csv_data': csv_data,
'msg': msg,
'error_msg': error_msg,
'success_import_list': success_import_list,
'type_error_import_list': type_error_import_list,
'dialer_setting_msg': user_dialer_setting_msg(request.user),
})
template = 'frontend/contact/import_contact.html'
return render_to_response(template, data,
context_instance=RequestContext(request))
| mpl-2.0 |
oskarnyqvist/arguments | argument/main.py | 1 | 8354 | import sys
class Mux(object):
def __init__(self, d = None):
self.commands = {}
for k,v in d.items():
self.commands[k] = v
def match(self, name, a):
self.commands[name] = a
def parse(self, args=None):
if not args:
args = sys.argv
if len(args) == 1:
return None, [IndexError("No command supplied" )]
cmd = args[1]
resargs = args[2:]
if cmd in self.commands:
return self.commands[cmd].parse(args[2:])
return None, [IndexError("Command not found: %s" % cmd)]
def help_usage(self):
lines = []
for n in sorted(self.commands):
p = "%s %s" % (sys.argv[0], n)
lines.append(self.commands[n].help_usage(p))
return "\n".join(lines)
def __str__(self):
return self.help_usage().rstrip()
def __unicode__(self):
return unicode(self).encode("utf-8")
class Arguments(object):
def __init__(self):
self.required = []
self.maybes = []
self.data = {}
self.text = {
'switch': {},
'option': {},
'required': {},
'maybe': {},
}
self.names = {
'switches': [],
'options': [],
}
self.abbr = {}
self.processors = {}
self.validators = {}
def _set_default_value(self, name, value):
if name in self.data:
raise ValueError("%s is already used" % name)
self.data[name] = value
def _set_abbr(self, name, abbr):
if abbr in self.abbr:
raise ValueError("%s is already used" % abbr)
self.abbr[abbr] = name
def switch(self, name, help=u"N/A", abbr=""):
self._set_default_value(name, False)
self.names['switches'].append(name)
self.text['switch'][name] = help
if abbr:
self._set_abbr(name, abbr)
def option(self, name, value, help=u"N/A", abbr=None):
self._set_default_value(name, value)
self.text['option'][name] = help
if abbr:
self.abbr[abbr] = name
def always(self, name, help="N/A"):
self.required.append(name)
self._set_default_value(name, None)
self.text['required'][name] = help
def maybe(self, name, help="N/A"):
self.maybes.append(name)
self._set_default_value(name, None)
self.text['maybe'][name] = help
def process(self, name, fun):
if name not in self.processors:
self.processors[name] = []
self.processors[name].append(fun)
def validate(self, name, fun, exp="Validation failed"):
if name not in self.validators:
self.validators[name] = []
self.validators[name].append((fun, exp))
def elongate(self, abbr):
return self.abbr[abbr]
def is_value(self, name):
return name in self.data
def is_switch(self, name):
return name in self.names['switches']
def is_abbr(self, name):
return name in self.abbr
def _parse_args(self, args):
requested = {}
ordinal = []
for a in args:
# require arguments
if a[0] != "-":
ordinal.append(a)
continue
#there is assigment, options
if "=" in a:
raw, value = a.split("=")
if len(raw) > 3 and raw[0:2] == "--":
name = raw.strip("-")
requested[name] = value
else:
requested[raw.strip("-")] = value
continue
# swtiches
if len(a) > 2 and a[0:2] == "--":
name = a[2:]
requested[name] = True
continue
if a[0] == "-":
for x in a[1:]:
requested[x] = True
return requested, ordinal
def parse(self, args=None):
if args is None:
args = sys.argv[1:]
requested, ordinal = self._parse_args(args)
results = {}
errors = []
# set defaults
for k in self.data:
results[k] = self.data[k]
# set ordinal
for e, value in enumerate(ordinal):
o = e - len(self.required)
if e < len(self.required):
name = self.required[e]
results[name] = value
elif o < len(self.maybes):
name = self.maybes[o]
results[name] = value
else:
errors.append(IndexError("Unnamed arguments found: [%s]" % value))
# set optional
for name, value in requested.items():
if self.is_abbr(name):
name = self.elongate(name)
if self.is_switch(name):
results[name] = True
elif self.is_value(name):
if value == True:
errors.append(
TypeError(
"Value argument used without value: [%s]" %
name)
)
continue
results[name] = value
else:
errors.append(ValueError("Unkown argument: [%s]" % name))
# processor
for name, processors in self.processors.items():
for p in processors:
if results[name]:
try:
results[name] = p(results[name])
except Exception as e:
errors.append(e)
# validators
for name, funs in self.validators.items():
for f, exp in funs:
if not f(results[name]):
errors.append(AssertionError("[%s] %s" % (name, exp)))
# check mandatory
if len(ordinal) < len(self.required):
missing = self.required[len(ordinal):]
errors.append(ValueError(
"Number of require arguments mismatch, missing: %s" %
(",".join(missing)))
)
return results, errors
def help_usage(self, prefix=None):
if not prefix:
prefix = sys.argv[0]
usage_options = ""
options_count = len([x for x in self.names.items()])
if options_count > 0:
usage_options = " [OPTIONS] "
mandatory = " ".join(self.required).upper()
length_name = sorted([len(x) for x in self.names.keys()])[-1]
length_values = sorted([len(str(x)) for x in self.data.values()])[-1]
len_just = length_name + length_values + 5
abbr_reverse = dict([(v, k) for k, v in self.abbr.items()])
r = ""
r += "Usage: " + prefix + usage_options + mandatory
r += "\n\n"
if len(self.required) > 0:
r += "Required arguments:\n"
for k, v in self.text['required'].items():
r += " " + k.ljust(len_just).upper() + " " * 6 + v
r += "\n"
r += "\n"
if len(self.maybes) > 0:
r += "Optional arguments:\n"
for k, v in self.text['maybe'].items():
r += " " + k.ljust(len_just).upper() + " " * 6 + v
r += "\n"
r += "\n"
if len(self.text["option"]) > 0:
r += "Options:\n"
for k, v in self.text["option"].items():
a = ""
if k in abbr_reverse:
a = "-" + abbr_reverse[k]
a = " " + a.rjust(2)
nv = "--%s=%s" % (k, self.data[k])
r += a + " " + nv.ljust(len_just) + " " * 2 + v
r += "\n"
if len(self.text["switch"]) > 0:
r += "\nSwitches:\n"
for k, v in self.text["switch"].items():
a = ""
if k in abbr_reverse:
a = "-" + abbr_reverse[k]
a = " " + a.rjust(2)
nv = "--%s" % (k)
r += a + " " + nv.ljust(len_just) + " " * 2 + v
r += "\n"
return r
def __str__(self):
return self.help_usage().rstrip()
def __unicode__(self):
return unicode(self).encode("utf-8")
| mit |
geekboxzone/lollipop_external_chromium_org | third_party/markupsafe/__init__.py | 371 | 8205 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| bsd-3-clause |
garimakhulbe/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/usage_name.py | 8 | 1056 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The Usage Names.
:param value: Gets a string describing the resource name.
:type value: str
:param localized_value: Gets a localized string describing the resource
name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, value=None, localized_value=None):
self.value = value
self.localized_value = localized_value
| mit |
Gabriel0402/zulip | zproject/settings.py | 41 | 33449 | # Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports local_settings.py, and any site-specific configuration
# belongs there. The template for local_settings.py is local_settings_template.py
########################################################################
import os
import platform
import time
import sys
import ConfigParser
from zerver.lib.db import TimeTrackingConnection
########################################################################
# INITIAL SETTINGS
########################################################################
config_file = ConfigParser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = ConfigParser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read("zproject/dev-secrets.conf")
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if not 'DEBUG' in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
TEMPLATE_DEBUG = DEBUG
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# Import variables like secrets from the local_settings file
# Import local_settings after determining the deployment/machine type
if PRODUCTION:
from local_settings import *
else:
# For the Dev VM environment, we use the same settings as the
# sample local_settings.py file, with a few exceptions.
from local_settings_template import *
EXTERNAL_HOST = 'localhost:9991'
ALLOWED_HOSTS = ['localhost']
AUTHENTICATION_BACKENDS = ('zproject.backends.DevAuthBackend',)
# Add some of the below if you're testing other backends
# AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
# 'zproject.backends.GoogleMobileOauth2Backend',
# 'zproject.backends.GoogleBackend')
EXTERNAL_URI_SCHEME = "http://"
EMAIL_GATEWAY_PATTERN = "%s@" + EXTERNAL_HOST
ADMIN_DOMAIN = "zulip.com"
NOTIFICATION_BOT = "notification-bot@zulip.com"
ERROR_BOT = "error-bot@zulip.com"
NEW_USER_BOT = "new-user-bot@zulip.com"
EMAIL_GATEWAY_BOT = "emailgateway@zulip.com"
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in local_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'MANDRILL_API_KEY': '',
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_BUCKET': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'DROPBOX_APP_KEY': '',
'ERROR_REPORTING': True,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': 'feedback@zulip.com',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'API_SUPER_USERS': set(),
'ADMINS': '',
'INLINE_IMAGE_PREVIEW': True,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'FEEDBACK_EMAIL': None,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'ZULIP_COM_STAGING': False,
'STATSD_HOST': '',
'REMOTE_POSTGRES_HOST': '',
'GOOGLE_CLIENT_ID': '',
'DBX_APNS_CERT_FILE': None,
}
for setting_name, setting_val in DEFAULT_SETTINGS.iteritems():
if not setting_name in vars():
vars()[setting_name] = setting_val
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
("ADMIN_DOMAIN", "example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "noreply@example.com"),
("DEFAULT_FROM_EMAIL", "Zulip <zulip@example.com>"),
("ALLOWED_HOSTS", "*"),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
TEMPLATE_DIRS = ( os.path.join(DEPLOY_ROOT, 'templates'), )
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
if PRODUCTION:
# Template caching is a significant performance win in production.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader',
TEMPLATE_LOADERS),
)
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
]
if not VOYAGER:
INSTALLED_APPS += [
'analytics',
'zilencer',
]
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://localhost:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
},
}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_USERNAME = 'zulip'
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 3600
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING = True
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except ConfigParser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [ {'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'} ]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if not bot['var_name'] in vars():
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name'] ] = bot_email
if EMAIL_GATEWAY_BOT not in API_SUPER_USERS:
API_SUPER_USERS.add(EMAIL_GATEWAY_BOT)
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
if PRODUCTION:
FEEDBACK_TARGET="https://zulip.com/api"
else:
FEEDBACK_TARGET="http://localhost:9991/api"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
if PIPELINE:
STATIC_ROOT = 'prod-static/serve'
else:
STATIC_ROOT = 'static/'
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'zerver.finders.ZulipFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = 'prod-static/serve'
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
PIPELINE_CSS = {
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/zulip.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/zulip.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
}
JS_SPECS = {
'common': {
'source_filenames': (
'third/jquery/jquery-1.7.2.js',
'third/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
),
'output_filename': 'min/common.js'
},
'signup': {
'source_filenames': (
'js/signup.js',
'third/jquery-validate/jquery.validate.js',
),
'output_filename': 'min/signup.js'
},
'initial_invite': {
'source_filenames': (
'third/jquery-validate/jquery.validate.js',
'js/initial_invite.js',
),
'output_filename': 'min/initial_invite.js'
},
'api': {
'source_filenames': ('js/api.js',),
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ('js/debug.js',),
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'third/jquery-validate/jquery.validate.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.02.js',
'third/xdate/xdate.dev.js',
'third/spin/spin.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/winchan/winchan.js',
'third/sockjs/sockjs-0.3.4.js',
'third/handlebars/handlebars.runtime.js',
'third/marked/lib/marked.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/localstorage.js',
'js/channel.js',
'js/setup.js',
'js/muting.js',
'js/muting_ui.js',
'js/viewport.js',
'js/rows.js',
'js/unread.js',
'js/stream_list.js',
'js/filter.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/admin.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_list_view.js',
'js/message_list.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/people.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/avatar.js',
'js/settings.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': (
'third/sorttable/sorttable.js',
),
'output_filename': 'min/activity.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ('third/sockjs/sockjs-0.3.4.js',),
'output_filename': 'min/sockjs-0.3.4.min.js'
},
}
app_srcs = JS_SPECS['app']['source_filenames']
PIPELINE_JS = {} # Now handled in tools/minify-js
PIPELINE_JS_COMPRESSOR = None
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
PIPELINE_YUI_BINARY = '/usr/bin/env yui-compressor'
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email-mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if VOYAGER:
EVENT_LOG_DIR = None
else:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.basename(path)
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
'when': 'D',
'interval': 7,
'backupCount': 100000000,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
'when': 'D',
'interval': 7,
'backupCount': 100000000,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [])
+ ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
)
ACCOUNT_ACTIVATION_DAYS=7
LOGIN_REDIRECT_URL='/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if (len(AUTHENTICATION_BACKENDS) == 1 and
AUTHENTICATION_BACKENDS[0] == "zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('guardian.backends.ObjectPermissionBackend',)
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
not 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# EMAIL SETTINGS
########################################################################
# If an email host is not specified, fail silently and gracefully
if not EMAIL_HOST and PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('feedback@zulip.com', 'notification-bot@zulip.com'))
| apache-2.0 |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/test/test_bigmem.py | 123 | 45241 | """Bigmem tests - tests for the 32-bit boundary in containers.
These tests try to exercise the 32-bit boundary that is sometimes, if
rarely, exceeded in practice, but almost never tested. They are really only
meaningful on 64-bit builds on machines with a *lot* of memory, but the
tests are always run, usually with very low memory limits to make sure the
tests themselves don't suffer from bitrot. To run them for real, pass a
high memory limit to regrtest, with the -M option.
"""
from test import support
from test.support import bigmemtest, _1G, _2G, _4G
import unittest
import operator
import sys
import functools
# These tests all use one of the bigmemtest decorators to indicate how much
# memory they use and how much memory they need to be even meaningful. The
# decorators take two arguments: a 'memuse' indicator declaring
# (approximate) bytes per size-unit the test will use (at peak usage), and a
# 'minsize' indicator declaring a minimum *useful* size. A test that
# allocates a bytestring to test various operations near the end will have a
# minsize of at least 2Gb (or it wouldn't reach the 32-bit limit, so the
# test wouldn't be very useful) and a memuse of 1 (one byte per size-unit,
# if it allocates only one big string at a time.)
#
# When run with a memory limit set, both decorators skip tests that need
# more memory than available to be meaningful. The precisionbigmemtest will
# always pass minsize as size, even if there is much more memory available.
# The bigmemtest decorator will scale size upward to fill available memory.
#
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, and don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEqual, assertIn or similar.
# It's a lengthy operation and the errormessage will be utterly useless
# due to its size. To make sure whether a result has the right contents,
# better to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes. Anything that probes the 32-bit boundary.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - Despite the bigmemtest decorator, all tests will actually be called
# with a much smaller number too, in the normal test run (5Kb currently.)
# This is so the tests themselves get frequent testing.
# Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
# BEWARE: it seems that one failing test can yield other subsequent tests to
# fail as well. I do not know whether it is due to memory fragmentation
# issues, or other specifics of the platform malloc() routine.
ascii_char_size = 1
ucs2_char_size = 2
ucs4_char_size = 4
class BaseStrTest:
def _test_capitalize(self, size):
_ = self.from_latin1
SUBSTR = self.from_latin1(' abc def ghi')
s = _('-') * size + SUBSTR
caps = s.capitalize()
self.assertEqual(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEqual(caps.lstrip(_('-')), SUBSTR)
@bigmemtest(size=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = self.from_latin1(' abc def ghi')
s = SUBSTR.center(size)
self.assertEqual(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G, memuse=2)
def test_count(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('.') * size + SUBSTR
self.assertEqual(s.count(_('.')), size)
s += _('.')
self.assertEqual(s.count(_('.')), size + 1)
self.assertEqual(s.count(_(' ')), 3)
self.assertEqual(s.count(_('i')), 1)
self.assertEqual(s.count(_('j')), 0)
@bigmemtest(size=_2G, memuse=2)
def test_endswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = _('...') + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith(_('a') + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(size=_2G + 10, memuse=2)
def test_expandtabs(self, size):
_ = self.from_latin1
s = _('-') * size
tabsize = 8
self.assertTrue(s.expandtabs() == s)
del s
slen, remainder = divmod(size, tabsize)
s = _(' \t') * slen
s = s.expandtabs(tabsize)
self.assertEqual(len(s), size - remainder)
self.assertEqual(len(s.strip(_(' '))), 0)
@bigmemtest(size=_2G, memuse=2)
def test_find(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.find(_(' ')), 0)
self.assertEqual(s.find(SUBSTR), 0)
self.assertEqual(s.find(_(' '), sublen), sublen + size)
self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEqual(s.find(_('i')), SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), sublen),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), size),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_index(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.index(_(' ')), 0)
self.assertEqual(s.index(SUBSTR), 0)
self.assertEqual(s.index(_(' '), sublen), sublen + size)
self.assertEqual(s.index(SUBSTR, sublen), sublen + size)
self.assertEqual(s.index(_('i')), SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), sublen),
sublen + size + SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), size),
sublen + size + SUBSTR.index(_('i')))
self.assertRaises(ValueError, s.index, _('j'))
@bigmemtest(size=_2G, memuse=2)
def test_isalnum(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalnum())
s += _('.')
self.assertFalse(s.isalnum())
@bigmemtest(size=_2G, memuse=2)
def test_isalpha(self, size):
_ = self.from_latin1
SUBSTR = _('zzzzzzz')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalpha())
s += _('.')
self.assertFalse(s.isalpha())
@bigmemtest(size=_2G, memuse=2)
def test_isdigit(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('9') * size + SUBSTR
self.assertTrue(s.isdigit())
s += _('z')
self.assertFalse(s.isdigit())
@bigmemtest(size=_2G, memuse=2)
def test_islower(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).isupper()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += _('A')
self.assertFalse(s.islower())
@bigmemtest(size=_2G, memuse=2)
def test_isspace(self, size):
_ = self.from_latin1
whitespace = _(' \f\n\r\t\v')
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += _('j')
self.assertFalse(s.isspace())
@bigmemtest(size=_2G, memuse=2)
def test_istitle(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('').join([_('A'), _('a') * size, SUBSTR])
self.assertTrue(s.istitle())
s += _('A')
self.assertTrue(s.istitle())
s += _('aA')
self.assertFalse(s.istitle())
@bigmemtest(size=_2G, memuse=2)
def test_isupper(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).islower()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += _('a')
self.assertFalse(s.isupper())
@bigmemtest(size=_2G, memuse=2)
def test_join(self, size):
_ = self.from_latin1
s = _('A') * size
x = s.join([_('aaaaa'), _('bbbbb')])
self.assertEqual(x.count(_('a')), 5)
self.assertEqual(x.count(_('b')), 5)
self.assertTrue(x.startswith(_('aaaaaA')))
self.assertTrue(x.endswith(_('Abbbbb')))
@bigmemtest(size=_2G + 10, memuse=1)
def test_ljust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=2)
def test_lower(self, size):
_ = self.from_latin1
s = _('A') * size
s = s.lower()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('a')), size)
@bigmemtest(size=_2G + 10, memuse=1)
def test_lstrip(self, size):
_ = self.from_latin1
SUBSTR = _('abc def ghi')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_replace(self, size):
_ = self.from_latin1
replacement = _('a')
s = _(' ') * size
s = s.replace(_(' '), replacement)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), size)
s = s.replace(replacement, _(' '), size - 4)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), 4)
self.assertEqual(s[-10:], _(' aaaa'))
@bigmemtest(size=_2G, memuse=2)
def test_rfind(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rfind(_(' ')), sublen + size + SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR), sublen + size)
self.assertEqual(s.rfind(_(' '), 0, size), SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rfind(_('i')), sublen + size + SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen), SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen + size),
SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_rindex(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rindex(_(' ')),
sublen + size + SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR), sublen + size)
self.assertEqual(s.rindex(_(' '), 0, sublen + size - 1),
SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rindex(_('i')),
sublen + size + SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen), SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen + size),
SUBSTR.rindex(_('i')))
self.assertRaises(ValueError, s.rindex, _('j'))
@bigmemtest(size=_2G + 10, memuse=1)
def test_rjust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=1)
def test_rstrip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(size=_2G, memuse=2.1)
def test_split_small(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = _('a') + _(' ') * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEqual(len(l), chunksize)
expected = _('a')
for item in l:
self.assertEqual(item, expected)
del l
l = s.split(_('a'))
self.assertEqual(len(l), chunksize + 1)
expected = _(' ') * chunksize
for item in filter(None, l):
self.assertEqual(item, expected)
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(size=_2G + 5, memuse=2 * ascii_char_size + 8)
def test_split_large(self, size):
_ = self.from_latin1
s = _(' a') * size + _(' ')
l = s.split()
self.assertEqual(len(l), size)
self.assertEqual(set(l), set([_('a')]))
del l
l = s.split(_('a'))
self.assertEqual(len(l), size + 1)
self.assertEqual(set(l), set([_(' ')]))
@bigmemtest(size=_2G, memuse=2.1)
def test_splitlines(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = _(' ') * chunksize + _('\n') + _(' ') * chunksize + _('\r\n')
s = SUBSTR * (chunksize * 2)
l = s.splitlines()
self.assertEqual(len(l), chunksize * 4)
expected = _(' ') * chunksize
for item in l:
self.assertEqual(item, expected)
@bigmemtest(size=_2G, memuse=2)
def test_startswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith(_('-') * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(size=_2G, memuse=1)
def test_strip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi ')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
def _test_swapcase(self, size):
_ = self.from_latin1
SUBSTR = _("aBcDeFG12.'\xa9\x00")
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEqual(len(s), sublen * repeats)
self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3)
def _test_title(self, size):
_ = self.from_latin1
SUBSTR = _('SpaaHAaaAaham')
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(size=_2G, memuse=2)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = bytes.maketrans(b'.aZ', b'-!$')
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
@bigmemtest(size=_2G + 5, memuse=2)
def test_upper(self, size):
_ = self.from_latin1
s = _('a') * size
s = s.upper()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('A')), size)
@bigmemtest(size=_2G + 20, memuse=1)
def test_zfill(self, size):
_ = self.from_latin1
SUBSTR = _('-568324723598234')
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith(_('0') + SUBSTR[1:]))
self.assertTrue(s.startswith(_('-0')))
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('0')), size - len(SUBSTR))
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_concat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s + s
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_repeat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s * 2
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
@bigmemtest(size=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEqual(s[i], SUBSTR[0])
self.assertEqual(s[i:i + sublen], SUBSTR)
self.assertEqual(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEqual(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEqual(s[len(s) - 1], SUBSTR[-1])
self.assertEqual(s[-1], SUBSTR[-1])
self.assertEqual(s[len(s) - 10], SUBSTR[0])
self.assertEqual(s[-sublen], SUBSTR[0])
self.assertEqual(s[len(s):], _(''))
self.assertEqual(s[len(s) - 1:], SUBSTR[-1:])
self.assertEqual(s[-1:], SUBSTR[-1:])
self.assertEqual(s[len(s) - sublen:], SUBSTR)
self.assertEqual(s[-sublen:], SUBSTR)
self.assertEqual(len(s[:]), len(s))
self.assertEqual(len(s[:len(s) - 5]), len(s) - 5)
self.assertEqual(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(size=_2G, memuse=2)
def test_contains(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
edge = _('-') * (size // 2)
s = _('').join([edge, SUBSTR, edge])
del edge
self.assertTrue(SUBSTR in s)
self.assertFalse(SUBSTR * 2 in s)
self.assertTrue(_('-') in s)
self.assertFalse(_('a') in s)
s += _('a')
self.assertTrue(_('a') in s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_compare(self, size):
_ = self.from_latin1
s1 = _('-') * size
s2 = _('-') * size
self.assertTrue(s1 == s2)
del s2
s2 = s1 + _('a')
self.assertFalse(s1 == s2)
del s2
s2 = _('.') * size
self.assertFalse(s1 == s2)
@bigmemtest(size=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
_ = self.from_latin1
s = _('\x00') * size
h1 = hash(s)
del s
s = _('\x00') * (size + 1)
self.assertNotEqual(h1, hash(s))
class StrTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s
def basic_encode_test(self, size, enc, c='.', expectedsize=None):
if expectedsize is None:
expectedsize = size
try:
s = c * size
self.assertEqual(len(s.encode(enc)), expectedsize)
finally:
s = None
def setUp(self):
# HACK: adjust memory use of tests inherited from BaseStrTest
# according to character size.
self._adjusted = {}
for name in dir(BaseStrTest):
if not name.startswith('test_'):
continue
meth = getattr(type(self), name)
try:
memuse = meth.memuse
except AttributeError:
continue
meth.memuse = ascii_char_size * memuse
self._adjusted[name] = memuse
def tearDown(self):
for name, memuse in self._adjusted.items():
getattr(type(self), name).memuse = memuse
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_swapcase(self, size):
self._test_swapcase(size)
# Many codecs convert to the legacy representation first, explaining
# why we add 'ucs4_char_size' to the 'memuse' below.
@bigmemtest(size=_2G + 2, memuse=ascii_char_size + 1)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@bigmemtest(size=_4G // 6 + 2, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 5 + 70, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 4 + 5, memuse=ascii_char_size + ucs4_char_size + 4)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4 * size + 4)
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_2G - 1, memuse=ascii_char_size + 1)
def test_encode_ascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
# str % (...) uses a Py_UCS4 intermediate representation
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertTrue(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEqual(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEqual(len(s), size * 2 + 3)
self.assertEqual(s.count('.'), 3)
self.assertEqual(s.count('-'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEqual(len(s), size + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
# ascii() calls encode('ascii', 'backslashreplace'), which itself
# creates a temporary Py_UNICODE representation in addition to the
# original (Py_UCS2) one
# There's also some overallocation when resizing the ascii() result
# that isn't taken into account here.
@bigmemtest(size=_2G // 5 + 1, memuse=ucs2_char_size +
ucs4_char_size + ascii_char_size * 6)
def test_unicode_repr(self, size):
# Use an assigned, but not printable code point.
# It is in the range of the low surrogates \uDC00-\uDFFF.
char = "\uDCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\udcba'"), r[-10:])
r = None
finally:
r = s = None
@bigmemtest(size=_2G // 5 + 1, memuse=ucs4_char_size * 2 + ascii_char_size * 10)
def test_unicode_repr_wide(self, size):
char = "\U0001DCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\U0001dcba'"), r[-12:])
r = None
finally:
r = s = None
# The original test_translate is overriden here, so as to get the
# correct size estimate: str.translate() uses an intermediate Py_UCS4
# representation.
@bigmemtest(size=_2G, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = {
ord(_('.')): _('-'),
ord(_('a')): _('!'),
ord(_('Z')): _('$'),
}
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
class BytesTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s.encode("latin-1")
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
class BytearrayTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return bytearray(s.encode("latin-1"))
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
test_hash = None
test_split_large = None
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = ('',) * size
t2 = ('',) * size
self.assertTrue(t1 == t2)
del t2
t2 = ('',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEqual(len(t), size)
t = t + t
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEqual(len(t), size * 5)
self.assertTrue(5 in t)
self.assertFalse((1, 2, 3, 4, 5) in t)
self.assertFalse(0 in t)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEqual(len(t), size)
self.assertEqual(t[-1], None)
self.assertEqual(t[5], None)
self.assertEqual(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEqual(t[:5], (None,) * 5)
self.assertEqual(t[-5:], (None,) * 5)
self.assertEqual(t[20:25], (None,) * 5)
self.assertEqual(t[-25:-20], (None,) * 5)
self.assertEqual(t[size - 5:], (None,) * 5)
self.assertEqual(t[size - 5:size], (None,) * 5)
self.assertEqual(t[size - 6:size - 2], (None,) * 4)
self.assertEqual(t[size:size], ())
self.assertEqual(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEqual(len(t), size)
t = t * 2
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
@bigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '(0, 0')
self.assertEqual(s[-5:], '0, 0)')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [''] * size
l2 = [''] * size
self.assertTrue(l1 == l2)
del l2
l2 = [''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEqual(len(l), size)
l = l + l
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(len(l), size * 5)
self.assertTrue(5 in l)
self.assertFalse([1, 2, 3, 4, 5] in l)
self.assertFalse(0 in l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEqual(len(l), size)
self.assertEqual(l[-1], None)
self.assertEqual(l[5], None)
self.assertEqual(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEqual(l[:5], [None] * 5)
self.assertEqual(l[-5:], [None] * 5)
self.assertEqual(l[20:25], [None] * 5)
self.assertEqual(l[-25:-20], [None] * 5)
self.assertEqual(l[size - 5:], [None] * 5)
self.assertEqual(l[size - 5:size], [None] * 5)
self.assertEqual(l[size - 6:size - 2], [None] * 4)
self.assertEqual(l[size:size], [])
self.assertEqual(l[size:size+5], [])
l[size - 2] = 5
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [None, 5, None])
self.assertEqual(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEqual(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 4)
del l[-2:]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 2)
del l[0]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[0], 2)
del l[:2]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEqual(len(l), size)
l = l * 2
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEqual(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEqual(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '[0, 0')
self.assertEqual(s[-5:], '0, 0]')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(size=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEqual(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(l.count(1), size)
self.assertEqual(l.count("1"), 0)
def basic_test_extend(self, size):
l = [object] * size
l.extend(l)
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1, 2, 3, 4, 5] * size
size *= 5
self.assertEqual(l.index(1), 0)
self.assertEqual(l.index(5, size - 5), size - 1)
self.assertEqual(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(size=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[:3], [1.0, "C", 1.0])
self.assertEqual(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(size=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = ["a", "b", "c", "d", "e"] * size
size *= 5
self.assertEqual(len(l), size)
item = l.pop()
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "e")
self.assertEqual(l[-2:], ["c", "d"])
item = l.pop(0)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "a")
self.assertEqual(l[:2], ["b", "c"])
item = l.pop(size - 2)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "c")
self.assertEqual(l[-2:], ["b", "d"])
@bigmemtest(size=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEqual(len(l), size)
l.remove(10)
size -= 1
self.assertEqual(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 10])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEqual(len(l), size * 5)
self.assertEqual(l[-5:], [5, 4, 3, 2, 1])
self.assertEqual(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEqual(len(l), size * 5)
self.assertEqual(l.count(1), size)
self.assertEqual(l[:10], [1] * 10)
self.assertEqual(l[-10:], [5] * 10)
def test_main():
support.run_unittest(StrTest, BytesTest, BytearrayTest,
TupleTest, ListTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
| lgpl-3.0 |
ronan22/obs-service-git-buildpackage | setup.py | 1 | 1718 | #!/usr/bin/python
# vim:fileencoding=utf-8:et:ts=4:sw=4:sts=4
#
# Copyright (C) 2013 Intel Corporation <markus.lehtonen@linux.intel.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Setup script"""
from setuptools import setup
def get_version():
"""Get version from the spec file"""
with open('packaging/obs-service-git-buildpackage.spec', 'r') as spec:
for line in spec.readlines():
if line.lower().startswith('version:'):
return line.split(':', 1)[1].strip()
raise Exception('ERROR: unable to parse version from spec file')
setup(name='obs_service_gbp',
version=get_version(),
description='OBS source service utilizing git-buildpackage',
author='Markus Lehtonen',
author_email='markus.lehtonen@linux.intel.com',
packages=['obs_service_gbp', 'gbp_repocache'],
data_files=[('/usr/lib/obs/service', ['service/git-buildpackage',
'service/git-buildpackage.service']),
('/etc/obs/services', ['config/git-buildpackage'])],
)
| gpl-2.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/test/test_generators.py | 72 | 50910 | import gc
import sys
import unittest
import weakref
from test import support
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> "RuntimeError: generator ignored GeneratorExit" in sys.stderr.getvalue()
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("test")
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... "Exception ignored in" in err
... "RuntimeError: test" in err
... "Traceback" in err
... "in invoke" in err
... finally:
... sys.stderr = old
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_unittest(__name__)
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| gpl-2.0 |
damien-dg/horizon | openstack_dashboard/contrib/sahara/content/data_processing/data_image_registry/views.py | 25 | 4526 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.forms import EditTagsForm
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.forms import RegisterImageForm
from openstack_dashboard.contrib.sahara.content. \
data_processing.data_image_registry.tables import ImageRegistryTable
LOG = logging.getLogger(__name__)
class ImageRegistryView(tables.DataTableView):
table_class = ImageRegistryTable
template_name = (
'project/data_processing.data_image_registry/image_registry.html')
page_title = _("Image Registry")
def get_data(self):
try:
images = saharaclient.image_list(self.request)
except Exception:
images = []
msg = _('Unable to retrieve image list')
exceptions.handle(self.request, msg)
return images
def update_context_with_plugin_tags(request, context):
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
plugins_object = dict()
for plugin in plugins:
plugins_object[plugin.name] = dict()
for version in plugin.versions:
try:
details = saharaclient. \
plugin_get_version_details(request,
plugin.name,
version)
plugins_object[plugin.name][version] = (
details.required_image_tags)
except Exception:
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
context["plugins"] = plugins_object
class EditTagsView(forms.ModalFormView):
form_class = EditTagsForm
template_name = (
'project/data_processing.data_image_registry/edit_tags.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Edit Image Tags")
def get_context_data(self, **kwargs):
context = super(EditTagsView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
update_context_with_plugin_tags(self.request, context)
return context
@memoized.memoized_method
def get_object(self):
try:
image = saharaclient.image_get(self.request,
self.kwargs["image_id"])
except Exception:
image = None
msg = _("Unable to fetch the image details")
exceptions.handle(self.request, msg)
return image
def get_initial(self):
image = self.get_object()
return {"image_id": image.id,
"tags_list": json.dumps(image.tags),
"user_name": image.username,
"description": image.description}
class RegisterImageView(forms.ModalFormView):
form_class = RegisterImageForm
template_name = (
'project/data_processing.data_image_registry/register_image.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Register Image")
def get_context_data(self, **kwargs):
context = super(RegisterImageView, self).get_context_data(**kwargs)
update_context_with_plugin_tags(self.request, context)
return context
def get_initial(self):
# need this initialization to allow registration
# of images without tags
return {"tags_list": json.dumps([])}
| apache-2.0 |
ICT4H/dcs-mangrove | mangrove/transport/contract/survey_response.py | 1 | 9191 | from copy import deepcopy
from datetime import datetime
from mangrove.datastore.database import DataObject
from mangrove.datastore.documents import SurveyResponseDocument, DataRecordDocument
from mangrove.datastore.entity import DataRecord
from mangrove.utils.types import is_string, sequence_to_str, is_sequence, is_empty
from mangrove.utils.dates import utcnow
WEB = u"web"
SMS = u"sms"
SMART_PHONE = u"smartPhone"
TEST_USER = u"test"
#todo: put it in utils and use it while returning SurveyResponse values itself
def convert_dict_keys_to_lowercase(dictionary):
for key in dictionary.keys():
dictionary[key.lower()] = dictionary.pop(key)
return dictionary
class SurveyResponse(DataObject):
__document_class__ = SurveyResponseDocument
def __init__(self, dbm, transport_info=None, form_model_id=None, form_model_revision=None, values=None, owner_uid=None,
admin_id=None, response=None):
DataObject.__init__(self, dbm)
self.response = response
if transport_info is not None:
doc = SurveyResponseDocument(channel=transport_info.transport,
destination=transport_info.destination,
form_model_id=form_model_id,
form_model_revision=form_model_revision,
values=values, status=False,
error_message="", owner_uid=owner_uid, modified_by_id=admin_id)
DataObject._set_document(self, doc)
@property
def version(self):
return self._doc.rev
@property
def data_record(self):
return DataRecord.get(self._dbm, self._doc.data_record_id) if self._doc.data_record_id is not None else None
@property
def data_record_id(self):
return self._doc.data_record_id
@property
def destination(self):
return self._doc.destination
@property
def owner_uid(self):
return self._doc.owner_uid
@owner_uid.setter
def owner_uid(self, owner_uid):
self._doc.owner_uid = owner_uid
@property
def modified_by(self):
return self._doc.modified_by
@modified_by.setter
def modified_by(self, modified_by):
self._doc.modified_by = modified_by
@property
def created_by(self):
return self._doc.created_by
@created_by.setter
def created_by(self, created_by):
self._doc.created_by = created_by
@property
def uuid(self):
return self.id
@property
def status(self):
return self._doc.status
@property
def channel(self):
return self._doc.channel
@channel.setter
def channel(self, channel):
self._doc.channel = channel
@property
def form_model_id(self):
return self._doc.form_model_id
@property
def form_model_revision(self):
return self._doc.form_model_revision
@form_model_revision.setter
def form_model_revision(self, form_model_revision):
self._doc.form_model_revision = form_model_revision
@form_model_id.setter
def form_model_id(self, form_model_id):
self._doc.form_model_id = form_model_id
@property
def values(self):
return self._doc.values
@property
def errors(self):
return self._doc.error_message
@property
def event_time(self):
return self._doc.event_time
@property
def submitted_on(self):
return self._doc.submitted_on
@property
def modified(self):
return self._doc.modified
def set_form(self, form_model):
self._doc.form_model_revision = form_model.revision
def set_answers(self, values):
if values:
self._doc.values = values
def set_status(self, errors):
if errors.__len__() == 0 and self.response is None:
self._doc.status = True
self._doc.error_message = ''
else:
self._doc.status = False
self._doc.error_message = self._to_string(errors)
def _void_existing_data_record(self, void=True):
data_record_id = self._doc.data_record_id
if data_record_id is not None:
data_record = DataRecord.get(self._dbm, data_record_id)
data_record.void(void)
self._doc.data_record_history.append(data_record_id)
def create(self, data_record_id):
self._doc.data_record_id = data_record_id
self.save()
def update(self, bound_form_model, data, entity=None):
assert self.errors == ''
submission_information = dict(form_code=bound_form_model.form_code)
data_record_id = self.add_data(data=data,
submission=submission_information)
self._void_existing_data_record()
self._doc.data_record_id = data_record_id
self.save()
def void(self, void=True):
self._void_existing_data_record(void)
super(SurveyResponse, self).void(void)
def add_data(self, entity=None, data=(), event_time=None, submission=None, multiple_records=False):
"""
Add a new datarecord to this Entity and return a UUID for the datarecord.
Arguments:
data: a sequence of ordered tuples, (label, value, type)
event_time: the time at which the event occured rather than
when it was reported
submission_id: an id to a 'submission' document in the
submission log from which this data came
"""
assert is_sequence(data)
assert event_time is None or isinstance(event_time, datetime)
assert self.id is not None, u"id should never be none, even if haven't been saved,an entity should have a UUID."
if event_time is None:
event_time = utcnow()
for (label, value) in data:
if is_empty(label):
raise ValueError(u'Empty label')
if multiple_records:
data_list = []
for (label, value) in data:
data_record = DataRecordDocument(
event_time=event_time,
data=[(label, value)],
submission=submission
)
data_list.append(data_record)
return self._dbm._save_documents(data_list)
else:
data_record_doc = DataRecordDocument(
event_time=event_time,
data=data,
submission=submission
)
return self._dbm._save_document(data_record_doc)
def _to_string(self, errors):
if is_string(errors):
return errors
if isinstance(errors, dict):
return sequence_to_str(errors.values())
if is_sequence(errors):
return sequence_to_str(errors)
return None
def differs_from(self, older_response):
difference = SurveyResponseDifference(older_response.submitted_on, self.status != older_response.status)
older_response_values = convert_dict_keys_to_lowercase(older_response.values)
new_response_values = convert_dict_keys_to_lowercase(self.values)
for key in new_response_values.keys():
if key in older_response_values.keys():
if new_response_values[key] != older_response_values[key]:
difference.add(key, older_response_values[key], new_response_values[key])
else:
difference.add(key, '', new_response_values[key])
return difference
def copy(self):
survey_copy = SurveyResponse(None)
survey_copy._doc = SurveyResponseDocument(self._doc.channel, self._doc.destination,
deepcopy(self.values), self.id, self.status, self.errors,
self.form_model_id, self.form_model_revision,
self.data_record.id if self.data_record else None,
deepcopy(self.event_time))
return survey_copy
@property
def is_anonymous_submission(self):
return self._doc.is_anonymous_submission
@is_anonymous_submission.setter
def is_anonymous_submission(self, value):
self._doc.is_anonymous_submission = value
@property
def is_guest_submission(self):
return self._doc.is_guest_submission
@is_guest_submission.setter
def is_guest_submission(self, value):
self._doc.is_guest_submission = value
class SurveyResponseDifference(object):
def __init__(self, submitted_on, status_changed):
self.created = submitted_on
self.status_changed = status_changed
self.changed_answers = {}
def add(self, key, old_value, new_value):
self.changed_answers[key] = {'old': old_value, 'new': new_value}
def __eq__(self, other):
assert isinstance(other, SurveyResponseDifference)
if self.created == other.created and self.status_changed == other.status_changed and self.changed_answers == other.changed_answers:
return True
| bsd-3-clause |
baloo/shinken | test/test_properties.py | 1 | 1876 | #!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
from shinken.property import UnusedProp, BoolProp, IntegerProp, FloatProp, CharProp, StringProp
class TestConfig(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
def setUp(self):
pass
#Test the bool property class
def test_bool_property(self):
p = BoolProp(default='1', class_inherit=[('Host', 'accept_passive_checks')])
print p.__dict__
s = "1"
val = p.pythonize(s)
print s, val
self.assert_(val == True)
s = "0"
val = p.pythonize(s)
print s, val
self.assert_(val == False)
#Now a service one
p = BoolProp(default='0', fill_brok=['full_status'])
print p.__dict__
s = "1"
val = p.pythonize(s)
print s, val
self.assert_(val == True)
self.assert_('full_status' in p.fill_brok)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
shurihell/testasia | openedx/core/djangoapps/util/forms.py | 70 | 2424 | """
Custom forms-related types
"""
from django.core.exceptions import ValidationError
from django.forms import Field, MultipleHiddenInput, NullBooleanField, Select
class MultiValueField(Field):
"""
Field class that supports a set of values for a single form field.
The field input can be specified as:
1. a comma-separated-list (foo:bar1,bar2,bar3), or
2. a repeated field in a MultiValueDict (foo:bar1, foo:bar2, foo:bar3)
3. a combination of the above (foo:bar1,bar2, foo:bar3)
Note that there is currently no way to pass a value that includes a comma.
The resulting field value is a python set of the values as strings.
"""
widget = MultipleHiddenInput
def to_python(self, list_of_string_values):
"""
Convert the form input to a list of strings
"""
values = super(MultiValueField, self).to_python(list_of_string_values) or set()
if values:
# combine all values if there were multiple specified individually
values = ','.join(values)
# parse them into a set
values = set(values.split(',')) if values else set()
return values
def validate(self, values):
"""
Ensure no empty values were passed
"""
if values and "" in values:
raise ValidationError("This field cannot be empty.")
class ExtendedNullBooleanField(NullBooleanField):
"""
A field whose valid values are None, True, 'True', 'true', '1',
False, 'False', 'false' and '0'.
"""
NULL_BOOLEAN_CHOICES = (
(None, ""),
(True, True),
(True, "True"),
(True, "true"),
(True, "1"),
(False, False),
(False, "False"),
(False, "false"),
(False, "0"),
)
widget = Select(choices=NULL_BOOLEAN_CHOICES)
def to_python(self, value):
"""
Explicitly checks for the string 'True', 'False', 'true',
'false', '1' and '0' and returns boolean True or False.
Returns None if value is not passed at all and raises an
exception for any other value.
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
elif not value:
return None
else:
raise ValidationError("Invalid Boolean Value.")
| agpl-3.0 |
CraigKelly/ted-youtube-data | ytscrape/ytcrawl.py | 1 | 2823 | #!/usr/bin/env python3
"""Extracting data from ytcrawl.tar.gz.
See the YTCrawl directory for details on creating it.
"""
# pylama:ignore=E501,D213
import json
import os
import sys
import inspect
import tarfile
from bs4 import BeautifulSoup
ALL_BRAG_BAR = set()
def log(msg, *args):
"""Log to stderr with optional formatting."""
if args:
msg = msg % args
pre = inspect.getfile(sys._getframe(1)) + ": "
sys.stderr.write(pre + msg + "\n")
sys.stderr.flush()
sys.stdout.flush()
def extract(buffer, start_tag, end_tag):
"""Extract the data bracketed by start_tag and end_tag."""
start = buffer.find(start_tag)
if start < 0:
return ""
end = buffer.find(end_tag, start + len(start_tag))
if end < start:
raise ValueError("Invalid buffer found - found '%s' but not '%s'" % (start_tag, end_tag))
return buffer[start+len(start_tag):end].strip()
def process(tarinfo, reader):
"""Extract all data in single tar file."""
ytid = os.path.split(tarinfo.name)[-1]
data = str(reader.read()).strip()
log("Processing: [%s] len=%12d %s", ytid, len(data), tarinfo.name)
err_msg = extract(data, "<error_message><![CDATA[", "]]></error_message>")
if err_msg:
log(" Skipping: %s", err_msg)
return False
daily_stats = json.loads(extract(data, "<graph_data><![CDATA[", "]]></graph_data>"))
brag_bar_html = extract(data, "<html_content><![CDATA[", "]]></html_content>")
assert daily_stats
assert brag_bar_html
brag_bar = {}
soup = BeautifulSoup(brag_bar_html, 'html.parser')
for td in soup.find_all("td"):
if "stats-bragbar" not in td.get('class'):
continue
label, value = "", ""
for ch in td.children:
if ch.name == "span" and "metric-label" in ch.get('class'):
label = ch.text
elif ch.name == "div" and "bragbar-metric" in ch.get('class'):
value = ch.text
if label and value:
brag_bar[label] = value
ALL_BRAG_BAR.add(label)
daily_stats["YTID"] = ytid
daily_stats["BragBar"] = brag_bar
print(json.dumps(daily_stats))
return True
def main():
"""Entry point."""
skip_names = set(["key.done", "log"])
count, processed = 0, 0
with tarfile.open("ytcrawl.tar.gz") as tf:
for tarinfo in tf:
if not tarinfo.isfile() or tarinfo.size < 1:
continue
if tarinfo.name in skip_names or "data/" not in tarinfo.name:
continue
count += 1
if process(tarinfo, tf.extractfile(tarinfo)):
processed += 1
log("Processed %d out of %d", processed, count)
log("All brag bar labels: %s", repr(ALL_BRAG_BAR))
if __name__ == '__main__':
main()
| mit |
ltcmelo/uaiso | Scripts/GenTokens.py | 1 | 7978 | #!/usr/bin/python
# -----------------------------------------------------------------------------
# Copyright (c) 2014-2016 Leandro T. C. Melo (ltcmelo@gmail.com)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
# -----------------------------------------------------------------------------
# -------------------------- #
# --- The UaiSo! Project --- #
# -------------------------- #
import operator
import GenLib
_token_map = {}
_sorted_tokens = []
_alias_tokens = set()
def write_bison_token_spec(bison_file):
""" Write the bison %token declaration """
print "Writing to %s" % bison_file
token_decls = ""
for token in _sorted_tokens:
# TODO: Filter tokens only relevant to this parser.
token_num = token[1][0]
if int(token_num) < 258:
# A single character ASCII token, skip it and let Bison define it.
continue
# Assemble the %token declarations.
if token[1][1]:
if token[0] in _alias_tokens:
# We don't want the aliases in Bison's declarations.
continue
token_decl = "%%token %s %s %s\n" % (token[0], token_num, token[1][1])
else:
token_decl = "%%token %s %s\n" % (token[0], token_num)
token_decls += token_decl
with open(bison_file, "r") as f:
content = f.read()
mark = (
" /*--------------------------------------------------*/\n"
" /*--- Token declarations ---*/\n"
" /*--- ---*/\n"
" /*--- This section is AUTOMATICALLY GENERATED. ---*/\n"
" /*--- Do NOT edit manually, changes will be lost ---*/\n"
" /*--- Please refer to Tokens.def ---*/\n"
" /*--------------------------------------------------*/\n"
)
begin = content.find(mark)
end = content.find(
" /*------------------------------------------*/\n"
" /*--- Tokens AUTOMATICALLY GENERATED end ---*/\n"
" /*------------------------------------------*/\n"
)
if begin == -1 or end == -1:
raise Exception("%s got messed up" % bison_file)
new_content = content[:begin + len(mark)]
new_content += token_decls
new_content += content[end:]
with open(bison_file, "w") as f:
f.write(new_content)
def write_token_names():
token_names_file = "Parsing/TokenName.cpp"
print "Creating %s" % token_names_file
content = GenLib.cpp_file_header()
content += (
"#include \"Parsing/Token.h\"\n"
"\n"
"namespace uaiso {\n"
"\n"
"std::unordered_map<std::uint16_t, const char*> tokenName {\n"
)
# Put tokens and names in a map, following the naming convention from the enumeration file.
for token in _sorted_tokens:
if token[0].startswith("BEGIN_") or token[0].startswith("END_"):
content += " { %s, %s },\n" % (token[0], token[1][1])
else:
content += " { TK_%s, %s },\n" % (token[0], token[1][1])
content += (
"};\n"
"\n"
"} // namespace uaiso\n"
)
with open(token_names_file, "w") as f:
f.write(content)
def write_tokens():
token_file = "Parsing/Token.h"
print "Creating %s" % token_file
content = GenLib.cpp_file_header()
content += (
"#ifndef UAISO_TOKEN_H__\n"
"#define UAISO_TOKEN_H__\n"
"\n"
"#include \"Common/Config.h\"\n"
"#include <cstdint>\n"
"#include <iostream>\n"
"#include <unordered_map>\n"
"\n"
"namespace uaiso {\n"
"\n"
"/* Tokens are unified, it's reponsibility of a lexer to provide only\n"
" the relevant tokens for a particular language. */\n"
"\n"
"enum Token : std::uint16_t\n"
"{\n"
)
# Declare enum items.
for token in _sorted_tokens:
if token[0].startswith("BEGIN_") or token[0].startswith("END_"):
# It's to indicate a token range, not a token itself.
enum_item = " %s = %s,\n" % (token[0], token[1][0])
else:
enum_item = " TK_%s = %s,\n" % (token[0], token[1][0])
content += enum_item
content += (
"};\n"
"\n"
"UAISO_API std::ostream& operator<<(std::ostream& os, Token tk);\n"
"\n"
"} // namespace uaiso\n"
"\n"
"#endif"
)
with open(token_file, "w") as f:
f.write(content)
def build_token_map():
""" The token specification is expected to have the following format:
TOKEN KIND SPELLING DATA
Where DATA is optional and might be the token's number or an alias
to another token. When DATA is not present, the token's number is
is taken from incremental counter, starting from the token
IDENT that begins at 258 (where bison starts numbering non-
single-character ASCII tokens).
"""
file_path = "Parsing/Tokens.def"
token_num_counter = 259 # Bison starts from 258, which matches our IDENT.
print "Generating tokens from %s" % file_path
# First thing is to build the token map.
with open(file_path, "r") as f:
for line in f:
line = line.strip() # Get rid of line endings.
if line.startswith("TOKEN"):
line = line.split(' ')[1:] # Discard the TOKEN part.
# The KIND part exists for every token and must not be duplicate.
token_kind = line[0]
if token_kind in _token_map:
raise Exception("Duplicate token kind %s" % token_kind)
# The SPELLING must also be specified for every token.
token_spelling = line[1]
if len(line) == 3:
# This token has explicit DATA, but we need to check whether it's
# actually a number, a spelling, or just a reference to another token.
# In the later case, the referenced code must have already been seen.
token_data = line[2]
if not token_data.isdigit():
# It's a reference to another token (an alias).
if not token_data in _token_map:
raise Exception("Could not find matching reference for %s" % token_data)
token_data = _token_map[token_data][0]
assert(token_data.isdigit())
_alias_tokens.add(token_kind)
_token_map[token_kind] = [token_data, token_spelling]
else:
# This is a "regular" token, with an incrementing code number.
assert(len(line) == 2)
_token_map[token_kind] = [str(token_num_counter), token_spelling]
token_num_counter += 1
# Sort the tokens for better reading.
global _sorted_tokens
_sorted_tokens = sorted(_token_map.items(), key=operator.itemgetter(0))
def run():
build_token_map()
write_tokens()
write_token_names()
write_bison_token_spec("D/D.y")
write_bison_token_spec("Go/Go.y")
if __name__ == "__main__":
run()
| lgpl-2.1 |
sparkslabs/kamaelia_ | Code/Python/Kamaelia/Kamaelia/Device/DVB/Receiver.py | 3 | 8056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=======================================================
DVB-T (Digital Terrestrial TV) Tuner & Demuxing Service
=======================================================
Tunes to the specified frequency, using the specified parameters, using a DVB
tuner device; then demultiplexes packets by packet ID (PID) from DVB/MPEG
transport streams. Provides this as a service, to which other components can
subscribe as clients, requesting to receive packets with certain PIDs.
This is a prefab component built out of a Tuner and DemuxerService component.
Example Usage
-------------
(Using experimental Kamaelia.Experimental.Services components)
Set up receiver as a named public service, then subscribe to specific PIDs for
recording a stream and some event information::
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
RegisterService( Receiver(505.833330, feparams),
{"DEMUXER":"inbox"}
).activate()
Pipeline( Subscribe("DEMUXER", [600,601]),
SimpleFileWriter("recording_of_600_and_601.ts"),
).activate()
Pipeline( Subscribe("DEMUXER", [18]),
SimpleFileWriter("event_information_data.ts")
).run()
How does it work?
-----------------
This component is a prefab combining a Tuner and a DemuxerService component.
Use this component in exactly the same way as you would use the
Kamaelia.Device.DVB.DemuxerService component. The only difference is that
requests should be sent to the "inbox" inbox, instead of a different one.
To request to be sent packets with particular PIDs, send messages of the form:
("ADD", (dest_component, dest_inboxname), [pid, pid, ...])
("REMOVE", (dest_component, dest_inboxname), [pid, pid, ...])
For more details, see Kamaelia.Device.DVB.DemuxerService.
Internally, the DemuxerService component is wired so that its requests for PIDs
go straight back to the Tuner component. When a client makes a request, the
DemuxerService therefore automatically asks the Tuner to give it only the
packets it needs to satisfy all its current clients.
This component will terminate if a shutdownMicroprocess or producerFinished
message is sent to the "control" inbox. The message will be forwarded on out of
the "signal" outbox just before termination.
"""
import os
import dvb3.frontend
import dvb3.dmx
import time
import struct
from Axon.ThreadedComponent import threadedcomponent
from Axon.Ipc import shutdownMicroprocess,producerFinished
from Kamaelia.Chassis.Graphline import Graphline
from Tuner import Tuner
from DemuxerService import DemuxerService
def Receiver(frequency, feparams, card=0):
return Graphline( TUNER = Tuner(frequency, feparams, card),
DEMUX = DemuxerService(),
linkages = {
("self", "inbox") : ("DEMUX","request"),
("DEMUX","pid_request") : ("TUNER","inbox"),
("TUNER","outbox") : ("DEMUX","inbox"),
# propagate shutdown messages
("self", "control") : ("TUNER", "control"),
("TUNER", "signal") : ("DEMUX", "control"),
("DEMUX", "signal") : ("self", "signal"),
}
)
__kamaelia_prefabs__ = ( Receiver, )
if __name__=="__main__":
import random
from Axon.Component import component
from Axon.CoordinatingAssistantTracker import coordinatingassistanttracker as CAT
from Axon.AxonExceptions import ServiceAlreadyExists
class Subscriber(component):
def __init__(self, servicename, spacing, *pids):
super(Subscriber,self).__init__()
self.notsubscribed = list(pids)
self.subscribed = []
self.spacing = " "*spacing
self.servicename = servicename
def takesomefrom(self,source):
items = []
if len(source):
qty = 1+random.randrange(0,len(source))
for _ in range(0,qty):
i = random.randrange(0,len(source))
items.append(source[i])
del source[i]
return items
def changeSubscription(self):
if random.randrange(0,2) == 1:
pids = self.takesomefrom(self.notsubscribed)
self.send( ("ADD",pids,(self,"inbox")), "outbox")
self.subscribed.extend(pids)
else:
pids = self.takesomefrom(self.subscribed)
self.send( ("REMOVE",pids,(self,"inbox")), "outbox")
self.notsubscribed.extend(pids)
print (self.spacing,"Now subscribed to pids:")
print (self.spacing," ",self.subscribed)
def main(self):
cat = CAT.getcat()
service = cat.retrieveService(self.servicename)
self.link((self,"outbox"),service)
nextchangetime = self.scheduler.time + random.randrange(5,10)
self.notyetreceived = self.subscribed[:]
while 1:
while self.dataReady("inbox"):
packet = self.recv("inbox")
pid = ((ord(packet[1]) << 8) + ord(packet[2])) & 0x1fff
if pid not in self.subscribed:
print (self.spacing,"Shouldn't have received pid:",pid)
else:
if pid in self.notyetreceived:
print (self.spacing,"Received 1st of pid:",pid)
self.notyetreceived.remove(pid)
if self.scheduler.time >= nextchangetime:
nextchangetime = self.scheduler.time + random.randrange(10,20)
self.changeSubscription()
self.notyetreceived = self.subscribed[:]
if self.subscribed:
self.pause()
yield 1
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Experimental.Services import RegisterService
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
print ("Tunes to UK Crystal palace transmitter MUX 1")
print ("Subscribers subscribe to PIDs that should contain data")
print ("May take several seconds before you see any activity...")
print ("---1st subscriber:------|---2nd subscriber:------")
Subscriber("MUX1", 0, 0,0x11,0x12,600,601).activate()
Subscriber("MUX1", 25, 0,0x11,0x12,600,601).activate()
demux = Receiver(505833330.0/1000000.0, feparams)
RegisterService(demux,{"MUX1":"inbox"}).run()
| apache-2.0 |
carlos-jenkins/ops-topology-lib-vtysh | setup.py | 1 | 2794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Hewlett Packard Enterprise Development LP <asicapi@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages
def read(filename):
"""
Read a file relative to setup.py location.
"""
import os
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, filename)) as fd:
return fd.read()
def find_version(filename):
"""
Find package version in file.
"""
import re
content = read(filename)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
def find_requirements(filename):
"""
Find requirements in file.
"""
import string
content = read(filename)
requirements = []
for line in content.splitlines():
line = line.strip()
if line and line[:1] in string.ascii_letters:
requirements.append(line)
return requirements
setup(
name='topology_lib_vtysh',
version=find_version('lib/topology_lib_vtysh/__init__.py'),
package_dir={'': 'lib'},
packages=find_packages('lib'),
# Dependencies
install_requires=find_requirements('requirements.txt'),
# Metadata
author='Hewlett Packard Enterprise Development LP',
author_email='asicapi@hp.com',
description=(
'Vtysh Communication Library for Topology'
),
long_description=read('README.rst'),
url='https://github-sc-p.corp.hp.com/hpe-networking/topology_lib_vtysh',
keywords='topology_lib_vtysh',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
# Entry points
entry_points={
'topology_library_10': [
'vtysh = topology_lib_vtysh.library'
]
}
)
| apache-2.0 |
protron/namebench | libnamebench/base_ui.py | 2 | 7465 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base user-interface workflow, to be inherited by UI modules."""
import tempfile
import benchmark
import better_webbrowser
import config
import data_sources
import geoip
import nameserver_list
import reporter
import site_connector
import util
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
class BaseUI(object):
"""Common methods for all UI implementations."""
def __init__(self):
self.SetupDataStructures()
def SetupDataStructures(self):
"""Instead of requiring users to inherit __init__(), this sets up structures."""
self.reporter = None
self.nameservers = None
self.bmark = None
self.report_path = None
self.csv_path = None
self.geodata = None
self.country = None
self.sources = {}
self.url = None
self.share_state = None
self.test_records = []
def UpdateStatus(self, msg, **kwargs):
"""Update the little status message on the bottom of the window."""
if hasattr(self, 'status_callback') and self.status_callback:
self.status_callback(msg, **kwargs)
else:
print msg
def DebugMsg(self, message):
self.UpdateStatus(message, debug=True)
def LoadDataSources(self):
self.data_src = data_sources.DataSources(status_callback=self.UpdateStatus)
def PrepareTestRecords(self):
"""Figure out what data source a user wants, and create test_records."""
if self.options.input_source:
src_type = self.options.input_source
else:
src_type = self.data_src.GetBestSourceDetails()[0]
self.options.input_source = src_type
self.test_records = self.data_src.GetTestsFromSource(
src_type,
self.options.query_count,
select_mode=self.options.select_mode
)
def PrepareNameServers(self):
"""Setup self.nameservers to have a list of healthy fast servers."""
self.nameservers = nameserver_list.NameServers(
self.supplied_ns,
global_servers=self.global_ns,
regional_servers=self.regional_ns,
include_internal=self.include_internal,
num_servers=self.options.num_servers,
timeout=self.options.timeout,
ping_timeout=self.options.ping_timeout,
health_timeout=self.options.health_timeout,
ipv6_only=self.options.ipv6_only,
status_callback=self.UpdateStatus
)
if self.options.invalidate_cache:
self.nameservers.InvalidateSecondaryCache()
self.nameservers.cache_dir = tempfile.gettempdir()
# Don't waste time checking the health of the only nameserver in the list.
if len(self.nameservers) > 1:
self.nameservers.thread_count = int(self.options.health_thread_count)
self.nameservers.cache_dir = tempfile.gettempdir()
self.UpdateStatus('Checking latest sanity reference')
(primary_checks, secondary_checks, censor_tests) = config.GetLatestSanityChecks()
if not self.options.enable_censorship_checks:
censor_tests = []
else:
self.UpdateStatus('Censorship checks enabled: %s found.' % len(censor_tests))
self.nameservers.CheckHealth(primary_checks, secondary_checks, censor_tests=censor_tests)
def PrepareBenchmark(self):
"""Setup the benchmark object with the appropriate dataset."""
if len(self.nameservers) == 1:
thread_count = 1
else:
thread_count = self.options.benchmark_thread_count
self.bmark = benchmark.Benchmark(self.nameservers,
query_count=self.options.query_count,
run_count=self.options.run_count,
thread_count=thread_count,
status_callback=self.UpdateStatus)
def RunBenchmark(self):
"""Run the benchmark."""
results = self.bmark.Run(self.test_records)
index = []
if self.options.upload_results in (1, True):
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
index_hosts = connector.GetIndexHosts()
if index_hosts:
index = self.bmark.RunIndex(index_hosts)
else:
index = []
self.DiscoverLocation()
if len(self.nameservers) > 1:
self.nameservers.RunPortBehaviorThreads()
self.reporter = reporter.ReportGenerator(self.options, self.nameservers,
results, index=index, geodata=self.geodata)
def DiscoverLocation(self):
if not getattr(self, 'geodata', None):
self.geodata = geoip.GetGeoData()
self.country = self.geodata.get('country_name', None)
return self.geodata
def RunAndOpenReports(self):
"""Run the benchmark and open up the report on completion."""
self.RunBenchmark()
best = self.reporter.BestOverallNameServer()
self.CreateReports()
if self.options.template == 'html':
self.DisplayHtmlReport()
if self.url:
self.UpdateStatus('Complete! Your results: %s' % self.url)
else:
self.UpdateStatus('Complete! %s [%s] is the best.' % (best.name, best.ip))
def CreateReports(self):
"""Create CSV & HTML reports for the latest run."""
if self.options.output_file:
self.report_path = self.options.output_file
else:
self.report_path = util.GenerateOutputFilename(self.options.template)
if self.options.csv_file:
self.csv_path = self.options_csv_file
else:
self.csv_path = util.GenerateOutputFilename('csv')
if self.options.upload_results in (1, True):
# This is for debugging and transparency only.
self.json_path = util.GenerateOutputFilename('js')
self.UpdateStatus('Saving anonymized JSON to %s' % self.json_path)
json_data = self.reporter.CreateJsonData()
f = open(self.json_path, 'w')
f.write(json_data)
f.close()
self.UpdateStatus('Uploading results to %s' % self.options.site_url)
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
self.url, self.share_state = connector.UploadJsonResults(
json_data,
hide_results=self.options.hide_results
)
if self.url:
self.UpdateStatus('Your sharing URL: %s (%s)' % (self.url, self.share_state))
self.UpdateStatus('Saving report to %s' % self.report_path)
f = open(self.report_path, 'w')
self.reporter.CreateReport(format=self.options.template,
output_fp=f,
csv_path=self.csv_path,
sharing_url=self.url,
sharing_state=self.share_state)
f.close()
self.UpdateStatus('Saving detailed results to %s' % self.csv_path)
self.reporter.SaveResultsToCsv(self.csv_path)
def DisplayHtmlReport(self):
self.UpdateStatus('Opening %s' % self.report_path)
better_webbrowser.output = self.DebugMsg
better_webbrowser.open(self.report_path)
| apache-2.0 |
ioc32/ripe-atlas-tools | tests/commands/probe_search.py | 1 | 22250 | # Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import requests
try:
from unittest import mock # Python 3.4+
except ImportError:
import mock
from ripe.atlas.tools.commands.probe_search import Command
from ripe.atlas.tools.exceptions import RipeAtlasToolsException
from ripe.atlas.cousteau import Probe
from ripe.atlas.tools.aggregators import ValueKeyAggregator
from ..base import capture_sys_output
COMMAND_MODULE = "ripe.atlas.tools.commands.probe_search"
class FakeGen(object):
def __init__(self,):
self.probes = [
Probe(id=1, meta_data={
"country_code": "GR", "asn_v4": 3333, "prefix_v4": "193.0/22"}),
Probe(id=2, meta_data={
"country_code": "DE", "asn_v4": 3333, "prefix_v4": "193.0/22"}),
Probe(id=3, meta_data={
"country_code": "DE", "asn_v4": 3332, "prefix_v4": "193.0/22"}),
Probe(id=4, meta_data={
"country_code": "NL", "asn_v4": 3333, "prefix_v4": "193.0/22"}),
Probe(id=5, meta_data={
"country_code": "GR", "asn_v4": 3333, "prefix_v4": "193.0/22"}),
]
self.total_count = 4
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
if not self.probes:
raise StopIteration()
else:
return self.probes.pop(0)
class TestProbesCommand(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_with_empty_args(self):
"""User passes no args, should fail with RipeAtlasToolsException"""
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args([])
cmd.run()
def test_with_random_args(self):
"""User passes random args, should fail with SystemExit"""
with capture_sys_output():
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["blaaaaaaa"])
cmd.run()
def test_arg_with_no_value(self):
"""User passed not boolean arg but no value"""
with capture_sys_output():
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--asn"])
cmd.run()
def test_arg_with_wrong_type(self):
"""User passed arg with wrong type. e.g string for asn"""
with capture_sys_output():
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--asn", "blaaaaa"])
cmd.run()
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--asnv4", "blaaaaa"])
cmd.run()
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--asnv6", "blaaaaa"])
cmd.run()
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--limit", "blaaaaa"])
cmd.run()
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--radius", "blaaaaa"])
cmd.run()
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--tag", "inv@lid"])
cmd.run()
def test_location_google_breaks(self):
"""User passed location arg but google api gave error"""
caught_exceptions = [
requests.ConnectionError, requests.HTTPError, requests.Timeout]
with mock.patch('requests.get') as mock_get:
for exception in caught_exceptions:
mock_get.side_effect = exception
with capture_sys_output():
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args(["--location", "blaaaa"])
cmd.run()
mock_get.side_effect = Exception()
with self.assertRaises(Exception):
cmd = Command()
cmd.init_args(["--location", "blaaaa"])
cmd.run()
def test_location_google_wrong_output(self):
"""User passed location arg but google api gave not expected format"""
with mock.patch('requests.get') as mock_get:
mock_get.return_value = requests.Response()
with mock.patch('requests.Response.json') as mock_json:
mock_json.return_value = {"blaaa": "bla"}
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args(["--location", "blaaaa"])
cmd.run()
def test_location_arg(self):
"""User passed location arg"""
with mock.patch('requests.get') as mock_get:
mock_get.return_value = requests.Response()
with mock.patch('requests.Response.json') as mock_json:
mock_json.return_value = {"results": [
{"geometry": {"location": {"lat": 1, "lng": 2}}}]}
cmd = Command()
cmd.init_args(["--location", "blaaaa"])
self.assertEquals(cmd.build_request_args(), {'radius': '1,2:15'})
def test_location_arg_with_radius(self):
"""User passed location arg"""
with mock.patch('requests.get') as mock_get:
mock_get.return_value = requests.Response()
with mock.patch('requests.Response.json') as mock_json:
mock_json.return_value = {"results": [
{"geometry": {"location": {"lat": 1, "lng": 2}}}
]}
cmd = Command()
cmd.init_args(["--location", "blaaaa", "--radius", "4"])
self.assertEquals(
cmd.build_request_args(),
{"radius": "1,2:4"}
)
def test_asn_args(self):
"""User passed asn arg together with asnv4 or asnv6"""
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args(["--asn", "3333", "--asnv4", "3333"])
cmd.run()
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args(["--asn", "3333", "--asnv6", "3333"])
cmd.run()
def test_prefix_args(self):
"""User passed prefix arg together with prefixv4 or prefixv6"""
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args([
"--prefix", "193.0.0.0/21",
"--prefixv4", "193.0.0.0/21"
])
cmd.run()
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args([
"--prefix", "2001:67c:2e8::/48",
"--prefixv6", "2001:67c:2e8::/48"
])
cmd.run()
def test_all_args(self):
"""User passed all arguments"""
cmd = Command()
cmd.init_args(["--all"])
self.assertEquals(cmd.build_request_args(), {})
def test_center_arg_wrong_value(self):
"""User passed center arg with wrong value"""
with self.assertRaises(RipeAtlasToolsException):
cmd = Command()
cmd.init_args(["--center", "blaaaa"])
cmd.run()
def test_center_arg(self):
"""User passed center arg"""
cmd = Command()
cmd.init_args(["--center", "1,2"])
self.assertEquals(
cmd.build_request_args(),
{"radius": "1,2:15"}
)
def test_center_arg_with_radius(self):
"""User passed center and radius arg"""
cmd = Command()
cmd.init_args(["--center", "1,2", "--radius", "4"])
self.assertEquals(cmd.build_request_args(), {"radius": "1,2:4"})
def test_country_arg(self):
"""User passed country code arg"""
cmd = Command()
cmd.init_args(["--country", "GR"])
self.assertEquals(cmd.build_request_args(), {"country_code": "GR"})
def test_country_arg_with_radius(self):
"""User passed country code arg together with radius"""
cmd = Command()
cmd.init_args(["--country", "GR", "--radius", "4"])
self.assertEquals(cmd.build_request_args(), {"country_code": "GR"})
def test_status_arg(self):
"""User passed valid status arg."""
for status in range(0, 3):
cmd = Command()
cmd.init_args(["--status", str(status)])
self.assertEquals(cmd.build_request_args(), {"status": status})
def test_status_arg_wrong_value(self):
"""User passed status arg with wrong value"""
with capture_sys_output():
with self.assertRaises(SystemExit):
cmd = Command()
cmd.init_args(["--status", "4"])
cmd.run()
def test_sane_tags(self):
"""Sane tags"""
cmd = Command()
cmd.init_args(["--tag", "native-ipv6"])
self.assertEquals(
cmd.build_request_args(),
{"tags": "native-ipv6"}
)
cmd = Command()
cmd.init_args(["--tag", "native-ipv6", "--tag", "system-ipv4-works"])
self.assertEquals(
cmd.build_request_args(),
{"tags": "native-ipv6,system-ipv4-works"}
)
def test_sane_args1(self):
"""User passed several arguments (1)"""
cmd = Command()
cmd.init_args([
"--center", "1,2",
"--radius", "4",
"--asnv4", "3333",
"--prefix", "193.0.0.0/21"
])
self.assertEquals(
cmd.build_request_args(),
{'asn_v4': 3333, 'prefix': '193.0.0.0/21', 'radius': '1,2:4'}
)
def test_sane_args2(self):
"""User passed several arguments (2)"""
cmd = Command()
cmd.init_args([
"--location", "Amsterdam",
"--asn", "3333",
"--prefixv4", "193.0.0.0/21"
])
path = '{}.Command.location2degrees'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = (1, 2)
self.assertEquals(cmd.build_request_args(), {
'asn': 3333,
'prefix_v4': '193.0.0.0/21',
'radius': '1,2:15'
})
def test_sane_args3(self):
"""User passed several arguments (3)"""
cmd = Command()
cmd.init_args([
"--center", "1,2",
"--asnv6", "3333",
"--prefixv6", "2001:67c:2e8::/48"
])
self.assertEquals(cmd.build_request_args(), {
'asn_v6': 3333,
'prefix_v6': '2001:67c:2e8::/48',
'radius': '1,2:15'
})
def test_render_ids_only(self):
"""User passed ids_only arg, testing rendiring"""
cmd = Command()
cmd.init_args([
"--ids-only", "--country", "GR"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
self.assertEquals(stdout.getvalue(), "1\n2\n3\n4\n5\n")
def test_render_ids_only_with_limit(self):
"""User passed ids_only arg together with limit, testing rendering"""
cmd = Command()
cmd.init_args([
"--ids-only",
"--country", "GR",
"--limit", "2"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
self.assertEquals(stdout.getvalue(), "1\n2\n")
def test_render_ids_only_with_aggr(self):
"""
User passed ids_only arg together with aggregate, testing rendering
"""
cmd = Command()
cmd.init_args([
"--ids-only",
"--country", "GR",
"--aggregate-by", "country"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
self.assertEquals(stdout.getvalue(), "1\n2\n3\n4\n5\n")
def test_get_aggregators(self):
"""User passed --aggregate-by args"""
cmd = Command()
cmd.init_args([
"--aggregate-by", "asn_v4",
"--aggregate-by", "country",
"--aggregate-by", "prefix_v4"
])
expected_output = [
ValueKeyAggregator(key="asn_v4"),
ValueKeyAggregator(key="country_code"),
ValueKeyAggregator(key="prefix_v4")
]
cmd.set_aggregators()
for index, v in enumerate(cmd.aggregators):
self.assertTrue(isinstance(v, ValueKeyAggregator))
self.assertEquals(
v.aggregation_keys,
expected_output[index].aggregation_keys
)
def test_render_without_aggregation(self):
"""Tests rendering of results without aggregation"""
cmd = Command()
cmd.init_args([
"--country", "GR"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
expected_output = [
"",
"Filters:",
" Country: GR",
"",
"ID Asn_v4 Asn_v6 Country Status ",
"===========================================",
str(b"1 3333 gr None "),
str(b"2 3333 de None "),
str(b"3 3332 de None "),
str(b"4 3333 nl None "),
str(b"5 3333 gr None "),
"===========================================",
" Showing 4 of 4 total probes",
"",
]
self.assertEquals(set(stdout.getvalue().split("\n")), set(expected_output))
def test_render_without_aggregation_with_limit(self):
"""Tests rendering of results without aggregation but with limit"""
cmd = Command()
cmd.init_args([
"--country", "GR",
"--limit", "2"
])
with capture_sys_output() as (stdout, stderr):
path = 'ripe.atlas.tools.commands.probe_search.ProbeRequest'
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
self.maxDiff = None
expected_output = [
"",
"Filters:",
" Country: GR",
"",
"ID Asn_v4 Asn_v6 Country Status ",
"===========================================",
str(b"1 3333 gr None "),
str(b"2 3333 de None "),
"===========================================",
" Showing 2 of 4 total probes",
""
]
self.assertEquals(set(stdout.getvalue().split("\n")), set(expected_output))
def test_render_with_aggregation(self):
"""Tests rendering of results with aggregation"""
cmd = Command()
cmd.init_args([
"--country", "GR",
"--aggregate-by", "country",
"--aggregate-by", "asn_v4",
"--aggregate-by", "prefix_v4"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
expected_blob = [
"",
"Filters:",
" Country: GR",
"",
" ID Asn_v4 Asn_v6 Country Status ",
"==============================================",
"Country: DE",
" ASN_V4: 3332",
" PREFIX_V4: 193.0/22",
str(b" 3 3332 de None "),
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 2 3333 de None "),
"",
"Country: GR",
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 1 3333 gr None "),
str(b" 5 3333 gr None "),
"",
"Country: NL",
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 4 3333 nl None "),
"==============================================",
" Showing 4 of 4 total probes",
""
]
self.maxDiff = None
out = stdout.getvalue()
expected_set = set(expected_blob)
returned_set = set(out.split("\n"))
self.assertEquals(returned_set, expected_set)
def test_render_with_aggregation_with_limit(self):
"""Tests rendering of results with aggregation with limit"""
cmd = Command()
cmd.init_args([
"--country", "GR",
"--aggregate-by", "country",
"--aggregate-by", "asn_v4",
"--aggregate-by", "prefix_v4",
"--limit", "1"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
expected_output = [
"",
"Filters:",
" Country: GR",
"",
" ID Asn_v4 Asn_v6 Country Status ",
"==============================================",
"Country: GR",
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 1 3333 gr None "),
"==============================================",
" Showing 1 of 4 total probes",
"",
]
expected_set = set(expected_output)
returned_set = set(stdout.getvalue().split("\n"))
self.assertEquals(returned_set, expected_set)
def test_render_with_aggregation_with_max_per_aggr(self):
"""
Tests rendering of results with aggregation with max per aggr option
"""
cmd = Command()
cmd.init_args([
"--country", "GR",
"--aggregate-by", "country",
"--aggregate-by", "asn_v4",
"--aggregate-by", "prefix_v4",
"--max-per-aggregation", "1"
])
with capture_sys_output() as (stdout, stderr):
path = '{}.ProbeRequest'.format(COMMAND_MODULE)
with mock.patch(path) as mock_get:
mock_get.return_value = FakeGen()
cmd.run()
expected_output = [
"",
"Filters:",
" Country: GR",
"",
" ID Asn_v4 Asn_v6 Country Status ",
"==============================================",
"Country: DE",
" ASN_V4: 3332",
" PREFIX_V4: 193.0/22",
str(b" 3 3332 de None "),
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 2 3333 de None "),
"",
"Country: GR",
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 1 3333 gr None "),
"",
"Country: NL",
" ASN_V4: 3333",
" PREFIX_V4: 193.0/22",
str(b" 4 3333 nl None "),
"==============================================",
" Showing 4 of 4 total probes",
"",
]
expected_set = set(expected_output)
returned_set = set(stdout.getvalue().split("\n"))
self.assertEquals(returned_set, expected_set)
| gpl-3.0 |
sunlightlabs/tcamp | tcamp/middleware.py | 1 | 2252 | import re
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.middleware.csrf import CsrfViewMiddleware
class PathRestrictedProxy(object):
""" a mixin for creating middleware that allows you to restrict
behavior to only those url fragments declared in settings """
path_rexp = r''
proxy_class = object
proxy = None
def __init__(self, *args, **kwargs):
patterns = getattr(settings, 'SESSIONABLE_URL_PATTERNS', (r'.'))
self.path_rexp = r'(%s)' % r'|'.join(patterns)
self.proxy = self.proxy_class(*args, **kwargs)
def is_restricted(self, request):
if re.search(self.path_rexp, request.path):
return False
return True
def process_request(self, request):
if not self.is_restricted(request):
try:
self.proxy.process_request(request)
except AttributeError:
pass
def process_response(self, request, response):
if self.is_restricted(request):
return response
try:
return self.proxy.process_response(request, response)
except AttributeError:
return response
class ConditionalSessionMiddleware(PathRestrictedProxy):
proxy_class = SessionMiddleware
class ConditionalAuthenticationMiddleware(PathRestrictedProxy):
proxy_class = AuthenticationMiddleware
def process_request(self, request):
if self.is_restricted(request):
# Always set a user, even if it's AnonymousUser
request.user = AnonymousUser()
else:
self.proxy.process_request(request)
class ConditionalMessageMiddleware(PathRestrictedProxy):
proxy_class = MessageMiddleware
class ConditionalCsrfViewMiddleware(PathRestrictedProxy):
proxy_class = CsrfViewMiddleware
# always process on request
def process_view(self, request, callback, callback_args, callback_kwargs):
self.proxy.process_view(request, callback, callback_args, callback_kwargs)
| bsd-3-clause |
inspirehep/inspire-dojson | inspire_dojson/common/rules.py | 1 | 36037 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON common rules."""
from __future__ import absolute_import, division, print_function
import re
from datetime import datetime
from flask import current_app
from six.moves import urllib
from dojson import utils
from inspire_schemas.api import load_schema
from inspire_schemas.utils import classify_field
from inspire_utils.date import PartialDate, earliest_date
from inspire_utils.helpers import force_list, maybe_int
from ..conferences.model import conferences
from ..data.model import data
from ..experiments.model import experiments
from ..hep.model import hep, hep2marc
from ..hepnames.model import hepnames, hepnames2marc
from ..institutions.model import institutions
from ..journals.model import journals
from ..utils import (
force_single_element,
get_recid_from_ref,
get_record_ref,
)
IS_INTERNAL_UID = re.compile(r'^(inspire:uid:)?\d{5}$')
IS_ORCID = re.compile(r'^(orcid:)?\d{4}-\d{4}-\d{4}-\d{3}[0-9X]$')
WEBLINKS = {
'00070': 'International Journal of Mathematics Server',
'00108': 'J.Knot Theor.Ramifications',
'00140': 'Heavy Ion Physics Server',
'00145': 'Int. J. Math. Math. Sci. Server',
'00154': 'J. Nonlin. Math. Phys. Server',
'00181': 'Spektrum der Wissenschaft Server',
'00182': 'SLAC Beam Line Server',
'00201': 'ICFA Instrum.Bull. Server',
'00203': 'Adv. Theor. Math. Phys. Server',
'00211': 'KFZ Nachrichten Server',
'00222': 'Living Reviews in Relativity Server',
'00228': 'Nonlin. Phenom. Complex Syst. Server',
'00238': 'Math. Phys. Anal. Geom. Server',
'00256': 'Geometry and Topology Server',
'00257': 'Electron.J.Diff.Eq. Server',
'00264': 'Entropy Server',
'00286': 'HEP Lib.Web. Server',
'00327': 'World Sci.Lect.Notes Phys. Server',
'00357': 'Cent. Eur. J. Phys. Server',
'00372': 'Romanian Astronomical Journal Server',
'00376': 'ICFA Server',
'00411': 'Les Houches Lect. Notes Server',
'00436': 'Int. J. Geom. Meth. Mod. Phys. Server',
'00438': 'Phys.Part.Nucl.Lett. Server',
'00462': 'Journal of Physics Conference Series Server',
'00466': 'Prog.Phys. Server',
'00480': 'SIGMA Server',
'00484': 'Electron. J. Theor. Phys. Server',
'AAEJA': 'Astronomy and Astrophysics Server',
'AASRE': 'Astronomy and Astrophysics Review Server',
'ADSABS': 'ADS Abstract Service',
'ADSABS2': 'ADS Abstract Service',
'AFLBD': 'Annales De La Fondation Louis De Broglie Server',
'AHPAA': 'Ann. Henri Poincare Server',
'AIPCONF': 'AIP Conference Server',
'AJPIA': 'Am.J.Phys. Server',
'ALETE': 'Astronomy Letters Server',
'ANJOA': 'Astron.J. Server',
'ANPYA': 'Annalen der Physik Server',
'APCPC': 'AIP Conference Server',
'APHYE': 'Astroparticle Physics Server',
'APNYA': 'Ann. Phys. Server',
'APPLA': 'Applied Physics Letter Server',
'APPOA': 'Acta Physica Polonica Server',
'APSSB': 'Astrophys. Space Sci. Server',
'APSVC': 'Acta Physica Slovaca Server',
'ARAAA': 'Annual Review of Astronomy and Astrophysics Server',
'ARNUA': 'Annual Review of Nuclear and Particle Science',
'ARTHF': 'Algebras and Representation Theory Server',
'ARXERR': 'Erratum from arXiv.org server',
'ASJOA': 'Astrophysical Journal Server',
'ATLAS': 'ATLAS Notes from CERN server',
'ATMP': 'ATMP Server',
'ATPYA': 'Astrophysics Server',
'ATROE': 'Astronomy Reports Server',
'AUJPA': 'Austral.J.Phys. Server',
'BABAR': 'Postscript from BABAR [BABAR Collaborators Only]',
'BABARWEB': 'BaBar Publications Database',
'BABARWEBP': 'BaBar Password Protected Publications Database',
'BJPHE': 'Braz. J. Phys. Server',
'BNL': 'Brookhaven Document Server',
'BOOK-REVIEW': 'Review of this book',
'BOOKCL': 'SLAC BOOKS Database',
'BOOKREVIEW': 'Book review/article',
'BOOKS': 'SLAC BOOKS Database',
'BROOKHAVEN': 'Brookhaven Document Server',
'C00-02-07': 'CHEP2000 Server',
'C00-06-26.2': 'EPAC 2000 Server',
'C00-09-14': 'QFTHEP00 Server',
'C00-10-09.4': 'PCAPAC00 Server',
'C01-01-20.1': 'Moriond Server',
'C01-06-18.1': 'PAC01 Server',
'C01-06-25.8': 'BeamBeam01 Server',
'C01-07-26': 'MENU2001 Server',
'C01-08-07': 'ICRC01 Server',
'C01-08-20.1': 'FEL 2001 Server',
'C01-09-06.1': 'QFTHEP01 Server',
'C01-10-02.1': 'DESY Conference Server',
'C02-03-18.1': 'Durham Conference Server',
'C02-06-17.2': 'SUSY02 Server',
'C02-09-22': 'Hadron Structure 2002 Server',
'C02-10-15.1': 'Aguas de Lindoia 2002 Server',
'C03-07-31': 'ICRC Server',
'C04-04-14': 'DIS04 Server',
'C04-04-19.4': 'ECLOUD04 Server',
'C04-11-08.2': 'HHH-2004 Server',
'C05-03-22': 'Phystat05 Conference Server',
'C05-09-12': 'Phystat05 Conference Server',
'C05-09-12.10': 'CERN Conference Server',
'C05-10-17': 'ICATPP9 Conference Server',
'C06-09-16.1': 'Yalta06 Conference Server',
'C07-04-16': 'DIS2007 Conference Server',
'C07-05-21.2': 'Blois07 Server',
'C91-08-05': 'SLAC Summer Inst. 1991 Server',
'C92-07-13.1': 'SLAC Summer Inst. 1992 Server',
'C93-07-26': 'SLAC Summer Inst. 1993 Server',
'C95-07-10.2': 'SLAC Summer Inst 1995 Server',
'C95-10-29': 'ICALEPCS\'95 Server',
'C97-09-04.1': 'QFTHEP\'97 Server',
'C97-10-13': 'IWAA\'97 Server',
'C98-06-22': 'EPAC98 Server',
'C99-03-29': 'JACoW PAC99 Server',
'C99-04-06': 'ANL Conference Server',
'C99-04-12.3': 'LHC 99 JACoW Server',
'C99-08-17': 'Conference Server',
'C99-10-04': 'ICALEPCS99 Server',
'CBPF': 'Rio de Janeiro, CBPF Server',
'CDF93': 'CDF Document Server',
'CDF94': 'CDF Document Server',
'CDF95': 'CDF Document Server',
'CDF96': 'CDF Document Server',
'CDF97': 'CDF Document Server',
'CDF98': 'CDF Document Server',
'CDFCONF94': 'CDF Document Server',
'CDFCONF95': 'CDF Document Server',
'CDFCONF96': 'CDF Document Server',
'CDFCONF97': 'CDF Document Server',
'CDFCONF98': 'CDF Document Server',
'CDS': 'CERN Document Server',
'CDSWEB': 'CERN Library Record',
'CECOA': 'CERN Courier Server',
'CERN': 'CERN Document Server',
'CERN-ATLAS': 'CERN ATLAS Server',
'CERN-ATLAS-THESIS': 'ATLAS Thesis Server',
'CERN9401': 'CERN Document Server',
'CERN9402': 'CERN Document Server',
'CERN9403': 'CERN Document Server',
'CERN9404': 'CERN Document Server',
'CERN9405': 'CERN Document Server',
'CERN9406': 'CERN Document Server',
'CERN9407': 'CERN Document Server',
'CERN9408': 'CERN Document Server',
'CERN9409': 'CERN Document Server',
'CERN9410': 'CERN Document Server',
'CERN9411': 'CERN Document Server',
'CERN9412': 'CERN Document Server',
'CERN9501': 'CERN Document Server',
'CERN9502': 'CERN Document Server',
'CERN9503': 'CERN Document Server',
'CERN9504': 'CERN Document Server',
'CERN9505': 'CERN Document Server',
'CERN9506': 'CERN Document Server',
'CERN9507': 'CERN Document Server',
'CERN9508': 'CERN Document Server',
'CERN9509': 'CERN Document Server',
'CERN9510': 'CERN Document Server',
'CERN9511': 'CERN Document Server',
'CERN9512': 'CERN Document Server',
'CERN9601': 'CERN Document Server',
'CERN9602': 'CERN Document Server',
'CERN9603': 'CERN Document Server',
'CERN9604': 'CERN Document Server',
'CERN9605': 'CERN Document Server',
'CERN9606': 'CERN Document Server',
'CERN9607': 'CERN Document Server',
'CERN9608': 'CERN Document Server',
'CERN9609': 'CERN Document Server',
'CERN9610': 'CERN Document Server',
'CERN9611': 'CERN Document Server',
'CERN9612': 'CERN Document Server',
'CERNAB': 'CERN AB Server',
'CERNKEY': 'CERN Library Record',
'CERNNO': 'CERN Library Record',
'CERNREC': 'Cern Document Server',
'CERNREP': 'CERN Server',
'CERNSL': 'CERN SL Server',
'CERNWEBCAST': 'CERN Web Lecture Archive',
'CERNYEL': 'CERN Yellow Reports Server',
'CERNYELLOW': 'CERN Yellow Reports Server',
'CHACD': 'J. Korean Astron. Soc. Server',
'CHAOE': 'Chaos Server',
'CHEP97': 'Postscript from CHEP97 Server',
'CHPHD': 'Chinese Phys. Server',
'CITESEER': 'CiteSeer Server',
'CJAAA': 'Chin. J. Astron. Astrophys. Server',
'CJOPA': 'Chin.J.Phys. Server',
'CJPHA': 'Canadian Journal of Physics Server',
'CLNS93': 'Cornell Document Server',
'CLNS94': 'Cornell Document Server',
'CLNS95': 'Cornell Document Server',
'CLNS96': 'Cornell Document Server',
'CLNS97': 'Cornell Document Server',
'CLNS98': 'Cornell Document Server',
'CLNSCONF97': 'Cornell Document Server',
'CLNSCONF98': 'Cornell Document Server',
'CMMPE': 'Cambridge Monogr.Math.Phys. Server',
'CMP': 'Commun. Math. Phys. Server',
'CMPHF': 'Condensed Matter Phys. Server',
'CNRS': 'CNRS Server',
'COGPRINTS': 'Cogprints Server',
'COLUMBIA-THESIS': 'Columbia U. Thesis Server',
'CORNELL-LNS': 'Cornell U., LNS Server',
'CPHCB': 'Comput. Phys. Commun. Server',
'CPLEE': 'Chinese Physics Letters Server',
'CQG': 'Class. Quant. Grav. server',
'CQGRD': 'Class. Quantum Grav. Server',
'CSENF': 'Comput. Sci. Engin. Server',
'CT-THESIS': 'Caltech Thesis Server',
'CTPHA': 'Contemp. Phys. Server',
'CTPMD': 'Commun. Theor. Phys. Server',
'CUP': 'Cambridge University Press Server',
'CZYPA': 'Czech. J. Phys. Server',
'D0': 'Postscript_Version from D0 Server',
'DANKA': 'Doklady Physics Server',
'DAPN': 'DAPNIA, Saclay Server',
'DAPN-THESIS': 'DAPNIA, Saclay Thesis Server',
'DARENET': 'DARENET Server',
'DELPHITHESIS': 'DELPHI Thesis Server',
'DESY': 'DESY Document Server',
'DESY91': 'DESY Document Server',
'DESY92': 'DESY Document Server',
'DESY93': 'DESY Document Server',
'DESY94': 'DESY Document Server',
'DESY95': 'DESY Document Server',
'DESY96': 'DESY Document Server',
'DESYPROC': 'DESY Proceedings Server',
'DGA': 'Diff. Geom. Appl. Document Server',
'DMSEE': 'Turkish Journal of Math Server',
'DOI': 'Journal Server',
'DOPHF': 'Doklady Physics Server',
'DSPACE': 'eCommons Digital Repository Server',
'DUBNA': 'JINR DUBNA Preprint Server',
'DUBNA2': 'JINR DUBNA Preprint Server',
'DURHAM': 'HepData',
'ECONF': 'Proceedings write-up on eConf',
'ECONFPDF': 'pdf from eConf',
'ECONFS': 'Slides on eConf',
'EJP': 'Europ. J. Phys. server',
'EJPHD': 'Eur. J. Phys. Server',
'ELJOUR1': 'EIPL Particle Physics Server',
'ELJOUR2': 'JHEP Server',
'EPHJA-A': 'Euro.Phys.J.A Server',
'EPHJA-B': 'Eur.Phys.J.B Server',
'EPHJA-C': 'Eur.Phys.J.C Server',
'EPHJA-D': 'Eur.Phys.J.D Server',
'EPHJA-E': 'Eur.Phys.J.direct Server',
'EPHJD': 'Eur.Phys.J.direct Server',
'EPJA': 'Europ. Phys. Jour. A Server',
'EPJB': 'Euro. Phys. Jour. B Server',
'EPJC': 'Eur.Phys.J. C. Server',
'EPJCD': 'Europ. Phys. Jour. direct C Server',
'EPJD': 'Euro. Phys. Jour. D Server',
'EPL': 'Europhys. Lett. Server',
'EPN': 'Europhys. News Server',
'EUCLID': 'Project Euclid Server',
'EUPNA': 'Europhysics News Server',
'EXASE': 'Experimental Astronomy Server',
'FBOOKS': 'Fermilab BOOKS Database',
'FBS': 'Few Body Sys. Server',
'FERMILAB': 'Fermilab Library Server (fulltext available)',
'FERMILABAPNOTE': 'Fermilab Library Server (fulltext available)',
'FERMILABBACHELORS': 'Fermilab Library Server (fulltext available)',
'FERMILABBEAMDOC': 'Fermilab Beam Docs Server',
'FERMILABCONF': 'Fermilab Library Server (fulltext available)',
'FERMILABDESIGN': 'Fermilab Library Server (fulltext available)',
'FERMILABEN': 'Fermilab Library Server (fulltext available)',
'FERMILABEXP': 'Fermilab Library Server (fulltext available)',
'FERMILABFN': 'Fermilab Library Server (fulltext available)',
'FERMILABLOI': 'Fermilab Library Server',
'FERMILABLU': 'Fermilab Library Server (fulltext available)',
'FERMILABMASTERS': 'Fermilab Library Server (fulltext unavailable)',
'FERMILABMASTERSF': 'Fermilab Library Server (fulltext available)',
'FERMILABMINERVA': 'MINERvA Document Server',
'FERMILABMISC': 'Fermilab Library Server (fulltext available)',
'FERMILABNAL': 'Fermilab Library Server (fulltext available)',
'FERMILABOTHER': 'Fermilab Library Server',
'FERMILABPROPOSAL': 'Fermilab Library Server (fulltext available)',
'FERMILABPUB': 'Fermilab Library Server (fulltext available)',
'FERMILABR': 'Fermilab Library Server (fulltext available)',
'FERMILABRESEARCH': 'Fermilab Library Server (fulltext available)',
'FERMILABSDC': 'Fermilab Library Server',
'FERMILABTEST': 'Fermilab Library Server (fulltext available)',
'FERMILABTHESIS': 'Fermilab Library Server (fulltext unavailable)',
'FERMILABTHESISF': 'Fermilab Library Server (fulltext available)',
'FERMILABTM': 'Fermilab Library Server (fulltext available)',
'FERMILABTODAY': 'Fermilab Today Result of the Week',
'FERMILABUPC': 'Fermilab Library Server (fulltext available)',
'FERMILABVLHCPUB': 'Fermilab Library Server (fulltext available)',
'FERMILABWORKBOOK': 'Fermilab Library Server',
'FIZIKAB': 'Fizika B Server',
'FNDPA': 'Found.Phys. Server',
'FOCUS': 'Physical Review Focus',
'FPLEE': 'Found. Phys. Lett. Server',
'FPYKA': 'Fortschr. Phys. Server',
'FZKAA-B': 'Fizika B Server',
'FZKAAB': 'Fizika B Server',
'GRGVA': 'Gen.Rel.Grav. Server',
'HEPLW': 'HEP Lib.Webzine Server',
'HEPPDF': 'PDF Server',
'HLTPA': 'Health Physics Server',
'HSERVER': 'HTML_Version from a server',
'HTTP://POS.SISSA.IT/ARCHIVE/CONFERENCES/045/026/LHC07_026.PDF': 'HTTP://WWW-BD.FNAL.GOV/ICFABD/NEWSLETTER45.PDF',
'ICTP': 'ICTP Trieste Preprint Server',
'ICTP-LNS': 'ICTP Lecture Notes Server',
'IEEE': 'IEEExplore Server',
'IHEP': 'IHEP Document Server',
'IJMPA': 'Int. J. Mod. Phys. Server',
'IJMPB': 'Int. J. Mod. Phys. Server',
'IJMPC': 'Int. J. Mod. Phys. Server',
'IJMPD': 'Int. J. Mod. Phys. Server',
'IJMPE': 'Int. J. Mod. Phys. Server',
'IJTPB': 'Int. J. Theor. Phys. Server',
'IMPAE': 'Int.J.Mod.Phys.A Server',
'IMPAE-A': 'Int.J.Mod.Phys.A Server',
'IMPAE-B': 'Int.J.Mod.Phys.B Server',
'IMPAE-C': 'Int.J.Mod.Phys.C Server',
'IMPAE-D': 'Int.J.Mod.Phys.D Server',
'IMPAE-E': 'Int.J.Mod.Phys.E Server',
'IN2P3': 'HAL in2p3 Server',
'INDICO': 'CERN Indico Server',
'INETA': 'Instruments and Experimental Techniques Server',
'INTERACTIONS': 'Interactions.org article',
'IOPLETT': 'IOP Phys.Express Lett. Server',
'IRNPE': 'Int.Rev.Nucl.Phys. Server',
'JACOW': 'Full-text at JACoW Server',
'JACOWS': 'Slides on JACoW Server',
'JAUMA': 'J.Austral.Math.Soc. Server',
'JCAPA': 'JCAP Electronic Journal Server',
'JCTPA': 'J. Comput. Phys. Server',
'JDGEA': 'J. Diff. Geom. Server',
'JETP': 'J. Exp. Theor. Phys. Server',
'JETPL': 'J. Exp. Theor. Phys. Lett. Server',
'JGP': 'J. Geom. Phys. Document Server',
'JGPHE': 'Journal of Geometry and Physics Server',
'JHEP': 'JHEP Electronic Journal Server',
'JHEPA': 'JHEP Electronic Journal Server',
'JHEPA-CONF': 'JHEP Conference PDF Server',
'JHEPOA': 'JHEP Electronic Journal Server',
'JINST': 'JINST Electronic Journal Server',
'JKPSD': 'J. Korean Phys. Soc. Server',
'JLAB': 'JLab Document Server',
'JMAPA': 'J.Math.Phys. Server',
'JMP': 'J. Math. Phys. server',
'JPA': 'J. Phys. A server',
'JPAGB': 'J. Phys. A Server',
'JPCBA': 'J. Phys. C Server',
'JPG': 'J. Phys. G server',
'JPHGB': 'J. Phys. G Server',
'JSTAT': 'Journal of Statistical Mechanics Server',
'JSTOR': 'JSTOR Server',
'JSYRE': 'J. Synchrotron Radiat. Server',
'JTPHE': 'J.Exp.Theor.Phys. Server',
'JTPLA': 'J.Exp.Theor.Phys.Lett. Server',
'JUPSA': 'Journal of the Physical Society of Japan Server',
'JVSTA': 'J. Vac. Sci. Technol. Server',
'JVSTA-A': 'J. Vac. Sci. Technol. A server',
'JVSTA-B': 'J. Vac. Sci. Technol. B server',
'JVSTB': 'J. Vac. Sci. Technol. Server',
'KEKSCAN': 'KEK scanned document',
'LANL': 'Los Alamos Server',
'LCLSP': 'LCLS Papers Server',
'LCLST': 'LCLS Tech Notes Server',
'LCNOTES': 'DESY LC Notes Server',
'LINAC2000': 'Linac2000 Econf Server',
'LMPHD': 'Lett. Math. Phys Server',
'LNPHA': 'Springer Lecture Notes of Physics Server',
'MPEJ': 'Mathematical Physics Electronic Journal Server',
'MPHYA': 'Med.Phys. Server',
'MPLAE-A': 'Mod.Phys.Lett.A Server',
'MPLAE-B': 'Mod.Phys.Lett.B Server',
'MSNET': 'Mathematical Reviews',
'MSTCE': 'Meas. Sci. Technol. Server',
'MTF': 'Fermilab MTF Notes Server',
'MUNI': 'Munich U. Server',
'NATUA': 'Nature Server',
'NCA': 'Il Nuovo Cimento Server',
'NCB': 'Il Nuovo Cimento Server',
'NCC': 'Il Nuovo Cimento Server',
'NCD': 'Il Nuovo Cimento Server',
'NDLTD': 'Networked Digital Library Server',
'NEWAS': 'New Astronomy Server',
'NEWAST': 'New Astronomy Server',
'NFKAF': 'FZK Nachr. Server',
'NIMA': 'Nucl. Instrum. Methods A Document Server',
'NIMB': 'Nucl. Instrum. Methods B Document Server',
'NJOPF': 'New Journal of Physics Server',
'NOAND': 'Nonlinear Analysis Server',
'NOBEL': 'Nobel Foundation Server',
'NOTES': 'Notes or further material',
'NOVO': 'Novosibirsk, IYF Server',
'NPA2': 'Nucl. Phys. A Document Server',
'NPB2': 'Nucl. Phys. B Document Server',
'NPBPS': 'Nuclear Physics Electronic',
'NPBPS2': 'Nuclear Physics B - Proceedings Supplements',
'NPE': 'Nuclear Physics Electronic Announcements',
'NSENA': 'Nucl.Sci.Eng. Server',
'NUCIA': 'Nuovo Cim. PDF Server',
'NUCLPHYS': 'Nuclear Physics Server',
'NUIMA-A': 'Nuclear Physics Electronic',
'NUIMA-B': 'Nuclear Physics Electronic',
'NUMDAM': 'NUMDAM Server',
'NUMI-PUBLIC': 'NuMI Server',
'NUMI-RESTRICTED': 'NuMI Restricted Server',
'NUPHA-A': 'Nuclear Physics Electronic',
'NUPHA-B': 'Nuclear Physics Electronic',
'NUPHZ': 'Nuclear Physics Electronic',
'NUPHZ-TOC': 'Nuclear Physics Electronic',
'NWSCA': 'New Scientist Server',
'OLAL': 'Orsay, LAL Server',
'OSTI': 'OSTI Information Bridge Server',
'OUP': 'Oxford University Press Server',
'PANUE': 'Phys.At.Nucl. Server',
'PANUEO': 'Phys.At.Nucl. Server',
'PARTICLEZ': 'particlez.org Server',
'PDG': 'Particle Data Group (PDG) Server',
'PDG-RPP': 'Review of Particle Properties full record',
'PDG2002PDF': 'PDF from PDG site',
'PDG2002PS': 'Postscript from PDG Site',
'PDG2004PDF': 'PDF from PDG site',
'PDG2004PS': 'PS from PDG site',
'PDG98': 'PDG-RPP Server',
'PDG98R': 'PDG Server',
'PDGJOURNAL': 'Review of Particle Properties full record',
'PDGLIV': 'pdgLive (measurements quoted by PDG)',
'PHLTA-A': 'Phys. Lett. A Server',
'PHLTA-B': 'Nuclear Physics Electronic',
'PHMBA': 'Phys. Med. Biol. Server',
'PHPEF': 'Physics in Perspective Server',
'PHRVA': 'Phys.Rev. Server',
'PHRVA-A': 'Phys. Rev. A Server',
'PHRVA-B': 'Phys. Rev. B Server',
'PHRVA-C': 'Phys. Rev. C Server',
'PHRVA-D': 'Phys. Rev. D Server',
'PHRVA-E': 'Phys. Rev. E Server',
'PHRVA-FOCUS': 'Physical Review Focus',
'PHSTB': 'Physica Scripta Server',
'PHTOA': 'Physics Today Server',
'PHUSE': 'Physics uspekhii Server',
'PHUZA': 'Physik in unserer Zeit Server',
'PHWOE': 'Physics World Server',
'PHYSA': 'Physica A Document Server',
'PHYSA-A': 'Physica A Server',
'PHYSA-D': 'Physica D Server',
'PHYSD': 'Physica A Document Server',
'PHYSICSWEB': 'Physicsweb.org article',
'PHYSICSWORLD': 'physicsworld.com article',
'PHYSORG': 'PhysOrg.com article',
'PHYSREV': 'Physical Review Server',
'PNASA': 'Proc.Nat.Acad.Sci. Server',
'POS': 'Proceedings of Science Server',
'PPN': 'Phys. Part. Nucl. Server',
'PPNP': 'Prog. Part. Nucl. Phys. Document Server',
'PPNPD': 'Prog. Part. Nucl. Phys. server',
'PPNUE': 'Phys.Part.Nucl. Server',
'PPNUE-S': 'Phys. Part. Nucl. Server',
'PPNUE1': 'Phys. Part. Nucl. (AIP) Server',
'PPNUES': 'Phys. Part. Nucl. Suppl. Server',
'PR': 'Physics Reports Document Server',
'PRAMC': 'Pramana Server',
'PRAMCARC': 'Pramana Archive Server',
'PRC': 'Phys. Rev. C Server',
'PRD': 'Phys. Rev. D Server',
'PRDOLA': 'Phys. Rev. Online Archive',
'PRE': 'Phys. Rev. E Server',
'PRL': 'Phys. Rev. Lett. Server',
'PRLTA': 'Phys. Rev. Lett. Server',
'PRPLC': 'Physics Reports Server',
'PRSLA': 'Proceedings of the Royal Society Server',
'PRSTA': 'Phys.Rev.ST Accel.Beams Server',
'PTPKA': 'Prog.Theor.Phys. Server',
'PTRSA': 'Phil.Trans.Roy.Soc.Lond. Server',
'PUBMED': 'PUBMED Server',
'RCCHB': 'La Recherche Server',
'RESOF': 'Resonance Journal of Science Education Server',
'RINGBERG': 'Ringberg Conference Server',
'RJMPE': 'Russ. J. Math. Phys. Server',
'RMEAE': 'Radiation Measurements Server',
'RMHPB': 'Reports on Mathematical Physics Server',
'RMP': 'Rev. Mod. Phys. Server',
'RMPHA': 'Rev. Mod. Phys. Server',
'RMPHE': 'Reviews in Mathematical Physics Server',
'RMXFA': 'Rev. Mex. Fis. Server',
'RORPE': 'Rom. Rep. Phys. Server',
'RPDOD': 'Radiation Protection Dosimetry Server',
'RPPHA': 'Rep. Prog. Phys. Server',
'RRPQA': 'Rom. J. Phys. Server',
'RSI': 'Rev.Sci.Instrum. Server',
'RSINA': 'Rev. Sci. Instrum. Server',
'SACLAY': 'Saclay Document Server',
'SALAM': 'ICTP Preprint Archive',
'SCAMA': 'Scientific American Server',
'SCIDIR': 'Science Direct',
'SCIEA': 'Science Server',
'SCIENCEBLOG': 'Science Blog article',
'SCIENCEDAILY': 'Science Daily Article',
'SERVER': 'Electronic Version from a server',
'SERVER2': 'Electronic Version from another server',
'SLAC': 'SLAC Document Server',
'SLACBOOK': 'SLAC Book Catalog Record',
'SLACPUB': 'SLAC Document Server',
'SLACREPT': 'SLAC Document Server',
'SLACTN': 'SLAC Document Server',
'SLACTODAY': 'SLAC Today article',
'SOPJA': 'Russ.Phys.J. Server',
'SPACE': 'SPACE.com Article',
'SPIE': 'SPIE Server',
'SPRINGER': 'Springer Books Server',
'SPTPA': 'Tech. Phys. Server',
'SSC': 'HEPNET Document Server',
'SSI91': 'SLAC Document Server',
'SSI92': 'SLAC Document Server',
'SSI93': 'SLAC Document Server',
'SSI94': 'SLAC Document Server',
'SSI96': 'SLAC Document Server',
'SSI97': 'SLAC Document Server',
'SSI98': 'SLAC Document Server',
'TECHPHYS': 'Tech. Phys. Server',
'TECHPHYSL': 'Tech. Phys. Lett. Server',
'TJPHE': 'Turkish Journal of Physics Server',
'TMFZA': 'Teor.Mat.Fiz. Server',
'TMPHA': 'Theor.Math.Phys. Server',
'TORONTO-A': 'Toronto U. Astron. Thesis Server',
'TP': 'Tech. Phys. Server',
'UCOL': 'University Coll. London Server',
'UMI': 'UMI Thesis Server',
'VAC': 'Vacuum Document Server',
'VACUA': 'Vacuum Server',
'VIDEO': 'Watch the video of the talk',
'WORLDSCI': 'World Scientific Books Server',
'ZBLATT': 'Zentralblatt MATH Server',
'ZFDOF': 'Journal of Physical Studies Server',
'ZNTFA': 'Z. Naturforsch. Server',
'ZPA': 'Z. Phys. A Server',
'ZPC': 'Z. Phys. C Server',
}
def control_number(endpoint):
"""Populate the ``control_number`` key.
Also populates the ``self`` key through side effects.
"""
def _control_number(self, key, value):
self['self'] = get_record_ref(int(value), endpoint)
return int(value)
return _control_number
conferences.over('control_number', '^001')(control_number('conferences'))
data.over('control_number', '^001')(control_number('data'))
experiments.over('control_number', '^001')(control_number('experiments'))
hep.over('control_number', '^001')(control_number('literature'))
hepnames.over('control_number', '^001')(control_number('authors'))
institutions.over('control_number', '^001')(control_number('institutions'))
journals.over('control_number', '^001')(control_number('journals'))
@hep2marc.over('001', '^control_number$')
@hepnames2marc.over('001', '^control_number$')
def control_number2marc(self, key, value):
return value
@conferences.over('legacy_version', '^005')
@data.over('legacy_version', '^005')
@experiments.over('legacy_version', '^005')
@hep.over('legacy_version', '^005')
@hepnames.over('legacy_version', '^005')
@institutions.over('legacy_version', '^005')
@journals.over('legacy_version', '^005')
def legacy_version(self, key, value):
return value
@hep2marc.over('005', '^legacy_version$')
@hepnames2marc.over('005', '^legacy_version$')
def legacy_version2marc(self, key, value):
return value
@hep.over('acquisition_source', '^541..')
@hepnames.over('acquisition_source', '^541..')
def acquisition_source(self, key, value):
"""Populate the ``acquisition_source`` key."""
def _get_datetime(value):
d_value = force_single_element(value.get('d', ''))
if d_value:
try:
date = PartialDate.loads(d_value)
except ValueError:
return d_value
else:
datetime_ = datetime(year=date.year, month=date.month, day=date.day)
return datetime_.isoformat()
internal_uid, orcid, source = None, None, None
a_values = force_list(value.get('a'))
for a_value in a_values:
if IS_INTERNAL_UID.match(a_value):
if a_value.startswith('inspire:uid:'):
internal_uid = int(a_value[12:])
else:
internal_uid = int(a_value)
elif IS_ORCID.match(a_value):
if a_value.startswith('orcid:'):
orcid = a_value[6:]
else:
orcid = a_value
else:
source = a_value
c_value = force_single_element(value.get('c', ''))
normalized_c_value = c_value.lower()
if normalized_c_value == 'batchupload':
method = 'batchuploader'
elif normalized_c_value == 'submission':
method = 'submitter'
else:
method = normalized_c_value
return {
'datetime': _get_datetime(value),
'email': value.get('b'),
'internal_uid': internal_uid,
'method': method,
'orcid': orcid,
'source': source,
'submission_number': value.get('e'),
}
@hep2marc.over('541', '^acquisition_source$')
@hepnames2marc.over('541', '^acquisition_source$')
def acquisition_source2marc(self, key, value):
orcid = value.get('orcid')
source = value.get('source')
a_value = 'orcid:' + orcid if orcid else source
method = value.get('method')
if method == 'batchuploader':
c_value = 'batchupload'
elif method == 'submitter':
c_value = 'submission'
else:
c_value = method
return {
'a': a_value,
'b': value.get('email'),
'c': c_value,
'd': value.get('datetime'),
'e': value.get('submission_number'),
}
@conferences.over('public_notes', '^500..')
@experiments.over('public_notes', '^500..')
@hepnames.over('public_notes', '^500..')
@institutions.over('public_notes', '^500..')
@journals.over('public_notes', '^500..')
@utils.flatten
@utils.for_each_value
def public_notes_500(self, key, value):
"""Populate the ``public_notes`` key."""
return [
{
'source': value.get('9'),
'value': public_note,
} for public_note in force_list(value.get('a'))
]
@hep2marc.over('500', '^public_notes$')
@hepnames2marc.over('500', '^public_notes$')
@utils.for_each_value
def public_notes2marc(self, key, value):
return {
'9': value.get('source'),
'a': value.get('value'),
}
@conferences.over('_private_notes', '^595..')
@experiments.over('_private_notes', '^595..')
@hepnames.over('_private_notes', '^595..')
@institutions.over('_private_notes', '^595..')
@journals.over('_private_notes', '^595..')
@utils.flatten
@utils.for_each_value
def _private_notes_595(self, key, value):
"""Populate the ``_private_notes`` key."""
return [
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('a'))
]
@hep2marc.over('595', '^_private_notes$')
@hepnames2marc.over('595', '^_private_notes$')
@utils.for_each_value
def _private_notes2marc(self, key, value):
return {
'9': value.get('source'),
'a': value.get('value'),
}
@conferences.over('inspire_categories', '^65017')
@experiments.over('inspire_categories', '^65017')
@hep.over('inspire_categories', '^65017')
@institutions.over('inspire_categories', '^65017')
@journals.over('inspire_categories', '^65017')
def inspire_categories(self, key, value):
schema = load_schema('elements/inspire_field')
valid_sources = schema['properties']['source']['enum']
inspire_categories = self.get('inspire_categories', [])
scheme = force_single_element(value.get('2'))
if scheme == 'arXiv': # XXX: we skip arXiv categories here because
return inspire_categories # we're going to add them later in a filter.
source = force_single_element(value.get('9', '')).lower()
if source not in valid_sources:
if source == 'automatically added based on dcc, ppf, dk':
source = 'curator'
elif source == 'submitter':
source = 'user'
else:
source = None
terms = force_list(value.get('a'))
for _term in terms:
term = classify_field(_term)
if term:
inspire_categories.append({
'term': term,
'source': source,
})
return inspire_categories
@hep2marc.over('65017', '^inspire_categories$')
@utils.for_each_value
def inspire_categories2marc(self, key, value):
return {
'2': 'INSPIRE',
'9': value.get('source'),
'a': value.get('term'),
}
@conferences.over('_private_notes', '^667..')
@experiments.over('_private_notes', '^667..')
@hep.over('_private_notes', '^667..')
@institutions.over('_private_notes', '^667..')
@utils.for_each_value
def _private_notes_667(self, key, value):
return {
'source': value.get('9'),
'value': value.get('a'),
}
@conferences.over('public_notes', '^680..')
@experiments.over('public_notes', '^680..')
@institutions.over('public_notes', '^680..')
@journals.over('public_notes', '^680..')
@utils.for_each_value
def public_notes_680(self, key, value):
return {
'source': value.get('9'),
'value': value.get('i'),
}
@conferences.over('urls', '^8564.')
@experiments.over('urls', '^8564.')
@hep.over('urls', '^8564.')
@institutions.over('urls', '^8564.')
@journals.over('urls', '^8564.')
def urls(self, key, value):
def _is_internal_url(url):
base = urllib.parse.urlparse(current_app.config['LEGACY_BASE_URL'])
base_netloc = base.netloc or base.path
base_domain = '.'.join(base_netloc.split('.')[-2:])
parsed_url = urllib.parse.urlparse(url)
url_netloc = parsed_url.netloc or parsed_url.path
return base_domain in url_netloc
urls = self.get('urls', [])
description = force_single_element(value.get('y'))
description = WEBLINKS.get(description, description)
for url in force_list(value.get('u')):
if not _is_internal_url(url):
urls.append({
'description': description,
'value': url,
})
return urls
@hep2marc.over('8564', '^urls$')
@hepnames2marc.over('8564', '^urls$')
@utils.for_each_value
def urls2marc(self, key, value):
return {
'u': value.get('value'),
'y': value.get('description'),
}
@conferences.over('legacy_creation_date', '^961..')
@experiments.over('legacy_creation_date', '^961..')
@hep.over('legacy_creation_date', '^961..')
@hepnames.over('legacy_creation_date', '^961..')
@institutions.over('legacy_creation_date', '^961..')
@journals.over('legacy_creation_date', '^961..')
def legacy_creation_date(self, key, value):
if 'legacy_creation_date' in self and self['legacy_creation_date']:
return self['legacy_creation_date']
x_values = force_list(value.get('x'))
if x_values:
return earliest_date(x_values)
@hep2marc.over('961', '^legacy_creation_date$')
@hepnames2marc.over('961', '^legacy_creation_date$')
def legacy_creation_date2marc(self, key, value):
return {'x': value}
def external_system_identifiers(endpoint):
"""Populate the ``external_system_identifiers`` key.
Also populates the ``new_record`` key through side effects.
"""
@utils.flatten
@utils.for_each_value
def _external_system_identifiers(self, key, value):
new_recid = maybe_int(value.get('d'))
if new_recid:
self['new_record'] = get_record_ref(new_recid, endpoint)
return [
{
'schema': 'SPIRES',
'value': ext_sys_id,
} for ext_sys_id in force_list(value.get('a'))
]
return _external_system_identifiers
conferences.over('external_system_identifiers', '^970..')(external_system_identifiers('conferences'))
experiments.over('external_system_identifiers', '^970..')(external_system_identifiers('experiments'))
hep.over('external_system_identifiers', '^970..')(external_system_identifiers('literature'))
institutions.over('external_system_identifiers', '^970..')(external_system_identifiers('institutions'))
journals.over('external_system_identifiers', '^970..')(external_system_identifiers('journals'))
@hep2marc.over('970', '^new_record$')
@hepnames2marc.over('970', '^new_record$')
def new_record2marc(self, key, value):
return {'d': get_recid_from_ref(value)}
@data.over('deleted', '^980..')
def deleted(self, key, value):
return value.get('c', '').upper() == 'DELETED'
def deleted_records(endpoint):
"""Populate the ``deleted_records`` key."""
@utils.for_each_value
def _deleted_records(self, key, value):
deleted_recid = maybe_int(value.get('a'))
if deleted_recid:
return get_record_ref(deleted_recid, endpoint)
return _deleted_records
conferences.over('deleted_records', '^981..')(deleted_records('conferences'))
data.over('deleted_records', '^981..')(deleted_records('data'))
experiments.over('deleted_records', '^981..')(deleted_records('experiments'))
hep.over('deleted_records', '^981..')(deleted_records('literature'))
hepnames.over('deleted_records', '^981..')(deleted_records('authors'))
institutions.over('deleted_records', '^981..')(deleted_records('institutions'))
journals.over('deleted_records', '^981..')(deleted_records('journals'))
@hep2marc.over('981', 'deleted_records')
@hepnames2marc.over('981', 'deleted_records')
@utils.for_each_value
def deleted_records2marc(self, key, value):
return {'a': get_recid_from_ref(value)}
| gpl-3.0 |
BassantMorsi/finderApp | lib/python2.7/site-packages/django/contrib/auth/checks.py | 60 | 6420 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from types import MethodType
from django.apps import apps
from django.conf import settings
from django.core import checks
from .management import _get_builtin_permissions
def check_user_model(app_configs=None, **kwargs):
if app_configs is None:
cls = apps.get_model(settings.AUTH_USER_MODEL)
else:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
for app_config in app_configs:
if app_config.label == app_label:
cls = app_config.get_model(model_name)
break
else:
# Checks might be run against a set of app configs that don't
# include the specified user model. In this case we simply don't
# perform the checks defined below.
return []
errors = []
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
errors.append(
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
obj=cls,
id='auth.E001',
)
)
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
errors.append(
checks.Error(
"The field named as the 'USERNAME_FIELD' "
"for a custom user model must not be included in 'REQUIRED_FIELDS'.",
obj=cls,
id='auth.E002',
)
)
# Check that the username field is unique
if not cls._meta.get_field(cls.USERNAME_FIELD).unique:
if (settings.AUTHENTICATION_BACKENDS ==
['django.contrib.auth.backends.ModelBackend']):
errors.append(
checks.Error(
"'%s.%s' must be unique because it is named as the 'USERNAME_FIELD'." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
obj=cls,
id='auth.E003',
)
)
else:
errors.append(
checks.Warning(
"'%s.%s' is named as the 'USERNAME_FIELD', but it is not unique." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint='Ensure that your authentication backend(s) can handle non-unique usernames.',
obj=cls,
id='auth.W004',
)
)
if isinstance(cls().is_anonymous, MethodType):
errors.append(
checks.Critical(
'%s.is_anonymous must be an attribute or property rather than '
'a method. Ignoring this is a security issue as anonymous '
'users will be treated as authenticated!' % cls,
obj=cls,
id='auth.C009',
)
)
if isinstance(cls().is_authenticated, MethodType):
errors.append(
checks.Critical(
'%s.is_authenticated must be an attribute or property rather '
'than a method. Ignoring this is a security issue as anonymous '
'users will be treated as authenticated!' % cls,
obj=cls,
id='auth.C010',
)
)
return errors
def check_models_permissions(app_configs=None, **kwargs):
if app_configs is None:
models = apps.get_models()
else:
models = chain.from_iterable(app_config.get_models() for app_config in app_configs)
Permission = apps.get_model('auth', 'Permission')
permission_name_max_length = Permission._meta.get_field('name').max_length
errors = []
for model in models:
opts = model._meta
builtin_permissions = dict(_get_builtin_permissions(opts))
# Check builtin permission name length.
max_builtin_permission_name_length = (
max(len(name) for name in builtin_permissions.values())
if builtin_permissions else 0
)
if max_builtin_permission_name_length > permission_name_max_length:
verbose_name_max_length = (
permission_name_max_length - (max_builtin_permission_name_length - len(opts.verbose_name_raw))
)
errors.append(
checks.Error(
"The verbose_name of model '%s.%s' must be at most %d characters "
"for its builtin permission names to be at most %d characters." % (
opts.app_label, opts.object_name, verbose_name_max_length, permission_name_max_length
),
obj=model,
id='auth.E007',
)
)
codenames = set()
for codename, name in opts.permissions:
# Check custom permission name length.
if len(name) > permission_name_max_length:
errors.append(
checks.Error(
"The permission named '%s' of model '%s.%s' is longer than %d characters." % (
name, opts.app_label, opts.object_name, permission_name_max_length
),
obj=model,
id='auth.E008',
)
)
# Check custom permissions codename clashing.
if codename in builtin_permissions:
errors.append(
checks.Error(
"The permission codenamed '%s' clashes with a builtin permission "
"for model '%s.%s'." % (
codename, opts.app_label, opts.object_name
),
obj=model,
id='auth.E005',
)
)
elif codename in codenames:
errors.append(
checks.Error(
"The permission codenamed '%s' is duplicated for model '%s.%s'." % (
codename, opts.app_label, opts.object_name
),
obj=model,
id='auth.E006',
)
)
codenames.add(codename)
return errors
| mit |
javier3407/Plugin.Video.JAVIER.TV.AR.BR | resources/lib/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 |
JeremyJStarcher/bin-shared | bin/bash-git-prompt/gitstatus.py | 6 | 4715 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""This module defines a Print function to use with python 2.x or 3.x., so we can use the prompt with older versions of
Python too
It's interface is that of python 3.0's print. See
http://docs.python.org/3.0/library/functions.html?highlight=print#print
Shamelessly ripped from
http://www.daniweb.com/software-development/python/code/217214/a-print-function-for-different-versions-of-python
"""
# change those symbols to whatever you prefer
symbols = {'ahead of': '↑·', 'behind': '↓·', 'prehash': ':'}
import sys
import re
from subprocess import Popen, PIPE
__all__ = ["Print"]
try:
Print = eval("print") # python 3.0 case
python_version = 3
to_str = str
except SyntaxError as e:
python_version = 2
to_str = unicode
D = dict()
try:
exec ("from __future__ import print_function\np=print", D)
Print = D["p"] # 2.6 case
except SyntaxError:
def Print(*args, **kwd): # 2.4, 2.5, define our own Print function
fout = kwd.get("file", sys.stdout)
w = fout.write
if args:
w(str(args[0]))
sep = kwd.get("sep", " ")
for a in args[1:]:
w(sep)
w(str(a))
w(kwd.get("end", "\n"))
finally:
del D
def get_tag_or_hash():
cmd = Popen(['git', 'describe', '--exact-match'], stdout=PIPE, stderr=PIPE)
so, se = cmd.communicate()
tag = '%s' % so.decode('utf-8').strip()
if tag:
return tag
else:
cmd = Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=PIPE, stderr=PIPE)
so, se = cmd.communicate()
hash_name = '%s' % so.decode('utf-8').strip()
return ''.join([symbols['prehash'], hash_name])
def get_stash():
cmd = Popen(['git', 'rev-parse', '--git-dir'], stdout=PIPE, stderr=PIPE)
so, se = cmd.communicate()
stash_file = '%s%s' % (so.decode('utf-8').rstrip(), '/logs/refs/stash')
try:
with open(stash_file) as f:
return sum(1 for _ in f)
except IOError:
return 0
# `git status --porcelain --branch` can collect all information
# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind
po = Popen(['git', 'status', '--porcelain', '--branch'], env={'LC_ALL': 'C'}, stdout=PIPE, stderr=PIPE)
stdout, stderr = po.communicate()
if po.returncode != 0:
sys.exit(0) # Not a git repository
# collect git status information
untracked, staged, changed, conflicts = [], [], [], []
num_ahead, num_behind = 0, 0
ahead, behind = '', ''
branch = ''
remote = ''
status = [(line[0], line[1], line[2:]) for line in stdout.decode('utf-8').splitlines()]
for st in status:
if st[0] == '#' and st[1] == '#':
if re.search('Initial commit on', st[2]):
branch = st[2].split(' ')[-1]
elif re.search('no branch', st[2]): # detached status
branch = get_tag_or_hash()
elif len(st[2].strip().split('...')) == 1:
branch = st[2].strip()
else:
# current and remote branch info
branch, rest = st[2].strip().split('...')
if len(rest.split(' ')) == 1:
# remote_branch = rest.split(' ')[0]
pass
else:
# ahead or behind
divergence = ' '.join(rest.split(' ')[1:])
divergence = divergence.lstrip('[').rstrip(']')
for div in divergence.split(', '):
if 'ahead' in div:
num_ahead = int(div[len('ahead '):].strip())
ahead = '%s%s' % (symbols['ahead of'], num_ahead)
elif 'behind' in div:
num_behind = int(div[len('behind '):].strip())
behind = '%s%s' % (symbols['behind'], num_behind)
remote = ''.join([behind, ahead])
elif st[0] == '?' and st[1] == '?':
untracked.append(st)
else:
if st[1] == 'M':
changed.append(st)
if st[0] == 'U':
conflicts.append(st)
elif st[0] != ' ':
staged.append(st)
stashed = get_stash()
if not changed and not staged and not conflicts and not untracked and not stashed:
clean = 1
else:
clean = 0
if remote == "":
remote = '.'
if python_version == 2:
remote = remote.decode('utf-8')
out = '\n'.join([
branch,
remote,
to_str(len(staged)),
to_str(len(conflicts)),
to_str(len(changed)),
to_str(len(untracked)),
to_str(stashed),
to_str(clean),
to_str(python_version),
])
if python_version == 2:
Print(out.encode('utf-8'))
else:
Print(out)
| unlicense |
monsta/pluma | plugins/pythonconsole/pythonconsole/console.py | 1 | 13457 | # -*- coding: utf-8 -*-
# pythonconsole.py -- Console widget
#
# Copyright (C) 2006 - Steve Frécinaux
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
# Parts from "Interactive Python-GTK Console" (stolen from epiphany's console.py)
# Copyright (C), 1998 James Henstridge <james@daa.com.au>
# Copyright (C), 2005 Adam Hooper <adamh@densi.com>
# Bits from pluma Python Console Plugin
# Copyrignt (C), 2005 Raphaël Slinckx
import string
import sys
import re
import traceback
import gobject
import gtk
import pango
from config import PythonConsoleConfig
__all__ = ('PythonConsole', 'OutFile')
class PythonConsole(gtk.ScrolledWindow):
__gsignals__ = {
'grab-focus' : 'override',
}
def __init__(self, namespace = {}):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.set_shadow_type(gtk.SHADOW_IN)
self.view = gtk.TextView()
self.view.modify_font(pango.FontDescription('Monospace'))
self.view.set_editable(True)
self.view.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.add(self.view)
self.view.show()
buffer = self.view.get_buffer()
self.normal = buffer.create_tag("normal")
self.error = buffer.create_tag("error")
self.command = buffer.create_tag("command")
PythonConsoleConfig.add_handler(self.apply_preferences)
self.apply_preferences()
self.__spaces_pattern = re.compile(r'^\s+')
self.namespace = namespace
self.block_command = False
# Init first line
buffer.create_mark("input-line", buffer.get_end_iter(), True)
buffer.insert(buffer.get_end_iter(), ">>> ")
buffer.create_mark("input", buffer.get_end_iter(), True)
# Init history
self.history = ['']
self.history_pos = 0
self.current_command = ''
self.namespace['__history__'] = self.history
# Set up hooks for standard output.
self.stdout = OutFile(self, sys.stdout.fileno(), self.normal)
self.stderr = OutFile(self, sys.stderr.fileno(), self.error)
# Signals
self.view.connect("key-press-event", self.__key_press_event_cb)
buffer.connect("mark-set", self.__mark_set_cb)
def do_grab_focus(self):
self.view.grab_focus()
def apply_preferences(self, *args):
config = PythonConsoleConfig()
self.error.set_property("foreground", config.color_error)
self.command.set_property("foreground", config.color_command)
def stop(self):
self.namespace = None
def __key_press_event_cb(self, view, event):
modifier_mask = gtk.accelerator_get_default_mod_mask()
event_state = event.state & modifier_mask
if event.keyval == gtk.keysyms.d and event_state == gtk.gdk.CONTROL_MASK:
self.destroy()
elif event.keyval == gtk.keysyms.Return and event_state == gtk.gdk.CONTROL_MASK:
# Get the command
buffer = view.get_buffer()
inp_mark = buffer.get_mark("input")
inp = buffer.get_iter_at_mark(inp_mark)
cur = buffer.get_end_iter()
line = buffer.get_text(inp, cur)
self.current_command = self.current_command + line + "\n"
self.history_add(line)
# Prepare the new line
cur = buffer.get_end_iter()
buffer.insert(cur, "\n... ")
cur = buffer.get_end_iter()
buffer.move_mark(inp_mark, cur)
# Keep indentation of precendent line
spaces = re.match(self.__spaces_pattern, line)
if spaces is not None:
buffer.insert(cur, line[spaces.start() : spaces.end()])
cur = buffer.get_end_iter()
buffer.place_cursor(cur)
gobject.idle_add(self.scroll_to_end)
return True
elif event.keyval == gtk.keysyms.Return:
# Get the marks
buffer = view.get_buffer()
lin_mark = buffer.get_mark("input-line")
inp_mark = buffer.get_mark("input")
# Get the command line
inp = buffer.get_iter_at_mark(inp_mark)
cur = buffer.get_end_iter()
line = buffer.get_text(inp, cur)
self.current_command = self.current_command + line + "\n"
self.history_add(line)
# Make the line blue
lin = buffer.get_iter_at_mark(lin_mark)
buffer.apply_tag(self.command, lin, cur)
buffer.insert(cur, "\n")
cur_strip = self.current_command.rstrip()
if cur_strip.endswith(":") \
or (self.current_command[-2:] != "\n\n" and self.block_command):
# Unfinished block command
self.block_command = True
com_mark = "... "
elif cur_strip.endswith("\\"):
com_mark = "... "
else:
# Eval the command
self.__run(self.current_command)
self.current_command = ''
self.block_command = False
com_mark = ">>> "
# Prepare the new line
cur = buffer.get_end_iter()
buffer.move_mark(lin_mark, cur)
buffer.insert(cur, com_mark)
cur = buffer.get_end_iter()
buffer.move_mark(inp_mark, cur)
buffer.place_cursor(cur)
gobject.idle_add(self.scroll_to_end)
return True
elif event.keyval == gtk.keysyms.KP_Down or event.keyval == gtk.keysyms.Down:
# Next entry from history
view.emit_stop_by_name("key_press_event")
self.history_down()
gobject.idle_add(self.scroll_to_end)
return True
elif event.keyval == gtk.keysyms.KP_Up or event.keyval == gtk.keysyms.Up:
# Previous entry from history
view.emit_stop_by_name("key_press_event")
self.history_up()
gobject.idle_add(self.scroll_to_end)
return True
elif event.keyval == gtk.keysyms.KP_Left or event.keyval == gtk.keysyms.Left or \
event.keyval == gtk.keysyms.BackSpace:
buffer = view.get_buffer()
inp = buffer.get_iter_at_mark(buffer.get_mark("input"))
cur = buffer.get_iter_at_mark(buffer.get_insert())
if inp.compare(cur) == 0:
if not event_state:
buffer.place_cursor(inp)
return True
return False
# For the console we enable smart/home end behavior incoditionally
# since it is useful when editing python
elif (event.keyval == gtk.keysyms.KP_Home or event.keyval == gtk.keysyms.Home) and \
event_state == event_state & (gtk.gdk.SHIFT_MASK|gtk.gdk.CONTROL_MASK):
# Go to the begin of the command instead of the begin of the line
buffer = view.get_buffer()
iter = buffer.get_iter_at_mark(buffer.get_mark("input"))
ins = buffer.get_iter_at_mark(buffer.get_insert())
while iter.get_char().isspace():
iter.forward_char()
if iter.equal(ins):
iter = buffer.get_iter_at_mark(buffer.get_mark("input"))
if event_state & gtk.gdk.SHIFT_MASK:
buffer.move_mark_by_name("insert", iter)
else:
buffer.place_cursor(iter)
return True
elif (event.keyval == gtk.keysyms.KP_End or event.keyval == gtk.keysyms.End) and \
event_state == event_state & (gtk.gdk.SHIFT_MASK|gtk.gdk.CONTROL_MASK):
buffer = view.get_buffer()
iter = buffer.get_end_iter()
ins = buffer.get_iter_at_mark(buffer.get_insert())
iter.backward_char()
while iter.get_char().isspace():
iter.backward_char()
iter.forward_char()
if iter.equal(ins):
iter = buffer.get_end_iter()
if event_state & gtk.gdk.SHIFT_MASK:
buffer.move_mark_by_name("insert", iter)
else:
buffer.place_cursor(iter)
return True
def __mark_set_cb(self, buffer, iter, name):
input = buffer.get_iter_at_mark(buffer.get_mark("input"))
pos = buffer.get_iter_at_mark(buffer.get_insert())
self.view.set_editable(pos.compare(input) != -1)
def get_command_line(self):
buffer = self.view.get_buffer()
inp = buffer.get_iter_at_mark(buffer.get_mark("input"))
cur = buffer.get_end_iter()
return buffer.get_text(inp, cur)
def set_command_line(self, command):
buffer = self.view.get_buffer()
mark = buffer.get_mark("input")
inp = buffer.get_iter_at_mark(mark)
cur = buffer.get_end_iter()
buffer.delete(inp, cur)
buffer.insert(inp, command)
self.view.grab_focus()
def history_add(self, line):
if line.strip() != '':
self.history_pos = len(self.history)
self.history[self.history_pos - 1] = line
self.history.append('')
def history_up(self):
if self.history_pos > 0:
self.history[self.history_pos] = self.get_command_line()
self.history_pos = self.history_pos - 1
self.set_command_line(self.history[self.history_pos])
def history_down(self):
if self.history_pos < len(self.history) - 1:
self.history[self.history_pos] = self.get_command_line()
self.history_pos = self.history_pos + 1
self.set_command_line(self.history[self.history_pos])
def scroll_to_end(self):
iter = self.view.get_buffer().get_end_iter()
self.view.scroll_to_iter(iter, 0.0)
return False
def write(self, text, tag = None):
buffer = self.view.get_buffer()
if tag is None:
buffer.insert(buffer.get_end_iter(), text)
else:
buffer.insert_with_tags(buffer.get_end_iter(), text, tag)
gobject.idle_add(self.scroll_to_end)
def eval(self, command, display_command = False):
buffer = self.view.get_buffer()
lin = buffer.get_mark("input-line")
buffer.delete(buffer.get_iter_at_mark(lin),
buffer.get_end_iter())
if isinstance(command, list) or isinstance(command, tuple):
for c in command:
if display_command:
self.write(">>> " + c + "\n", self.command)
self.__run(c)
else:
if display_command:
self.write(">>> " + c + "\n", self.command)
self.__run(command)
cur = buffer.get_end_iter()
buffer.move_mark_by_name("input-line", cur)
buffer.insert(cur, ">>> ")
cur = buffer.get_end_iter()
buffer.move_mark_by_name("input", cur)
self.view.scroll_to_iter(buffer.get_end_iter(), 0.0)
def __run(self, command):
sys.stdout, self.stdout = self.stdout, sys.stdout
sys.stderr, self.stderr = self.stderr, sys.stderr
# eval and exec are broken in how they deal with utf8-encoded
# strings so we have to explicitly decode the command before
# passing it along
command = command.decode('utf8')
try:
try:
r = eval(command, self.namespace, self.namespace)
if r is not None:
print `r`
except SyntaxError:
exec command in self.namespace
except:
if hasattr(sys, 'last_type') and sys.last_type == SystemExit:
self.destroy()
else:
traceback.print_exc()
sys.stdout, self.stdout = self.stdout, sys.stdout
sys.stderr, self.stderr = self.stderr, sys.stderr
def destroy(self):
pass
#gtk.ScrolledWindow.destroy(self)
class OutFile:
"""A fake output file object. It sends output to a TK test widget,
and if asked for a file number, returns one set on instance creation"""
def __init__(self, console, fn, tag):
self.fn = fn
self.console = console
self.tag = tag
def close(self): pass
def flush(self): pass
def fileno(self): return self.fn
def isatty(self): return 0
def read(self, a): return ''
def readline(self): return ''
def readlines(self): return []
def write(self, s): self.console.write(s, self.tag)
def writelines(self, l): self.console.write(l, self.tag)
def seek(self, a): raise IOError, (29, 'Illegal seek')
def tell(self): raise IOError, (29, 'Illegal seek')
truncate = tell
# ex:et:ts=4:
| gpl-2.0 |
sumedhasingla/VTK | Filters/Extraction/Testing/Python/ExtractTensors.py | 20 | 2970 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create tensor ellipsoids
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ptLoad = vtk.vtkPointLoad()
ptLoad.SetLoadValue(100.0)
ptLoad.SetSampleDimensions(30,30,30)
ptLoad.ComputeEffectiveStressOn()
ptLoad.SetModelBounds(-10,10,-10,10,-10,10)
extractTensor = vtk.vtkExtractTensorComponents()
extractTensor.SetInputConnection(ptLoad.GetOutputPort())
extractTensor.ScalarIsEffectiveStress()
extractTensor.ScalarIsComponent()
extractTensor.ExtractScalarsOn()
extractTensor.ExtractVectorsOn()
extractTensor.ExtractNormalsOff()
extractTensor.ExtractTCoordsOn()
contour = vtk.vtkContourFilter()
contour.SetInputConnection(extractTensor.GetOutputPort())
contour.SetValue(0,0)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(contour.GetOutputPort())
probe.SetSourceConnection(ptLoad.GetOutputPort())
su = vtk.vtkLoopSubdivisionFilter()
su.SetInputConnection(probe.GetOutputPort())
su.SetNumberOfSubdivisions(1)
s1Mapper = vtk.vtkPolyDataMapper()
s1Mapper.SetInputConnection(probe.GetOutputPort())
# s1Mapper SetInputConnection [su GetOutputPort]
s1Actor = vtk.vtkActor()
s1Actor.SetMapper(s1Mapper)
#
# plane for context
#
g = vtk.vtkImageDataGeometryFilter()
g.SetInputConnection(ptLoad.GetOutputPort())
g.SetExtent(0,100,0,100,0,0)
g.Update()
#for scalar range
gm = vtk.vtkPolyDataMapper()
gm.SetInputConnection(g.GetOutputPort())
gm.SetScalarRange(g.GetOutput().GetScalarRange())
ga = vtk.vtkActor()
ga.SetMapper(gm)
s1Mapper.SetScalarRange(g.GetOutput().GetScalarRange())
#
# Create outline around data
#
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(ptLoad.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
#
# Create cone indicating application of load
#
coneSrc = vtk.vtkConeSource()
coneSrc.SetRadius(.5)
coneSrc.SetHeight(2)
coneMap = vtk.vtkPolyDataMapper()
coneMap.SetInputConnection(coneSrc.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMap)
coneActor.SetPosition(0,0,11)
coneActor.RotateY(90)
coneActor.GetProperty().SetColor(1,0,0)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.113766,-1.13665,-1.01919)
camera.SetPosition(-29.4886,-63.1488,26.5807)
camera.SetViewAngle(24.4617)
camera.SetViewUp(0.17138,0.331163,0.927879)
camera.SetClippingRange(1,100)
ren1.AddActor(s1Actor)
ren1.AddActor(outlineActor)
ren1.AddActor(coneActor)
ren1.AddActor(ga)
ren1.SetBackground(1.0,1.0,1.0)
ren1.SetActiveCamera(camera)
renWin.SetSize(300,300)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
rjschwei/azure-sdk-for-python | azure-mgmt-batch/azure/mgmt/batch/models/batch_location_quota.py | 3 | 1115 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BatchLocationQuota(Model):
"""Quotas associated with a Batch region for a particular subscription.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar account_quota: The number of Batch accounts that may be created
under the subscription in the specified region.
:vartype account_quota: int
"""
_validation = {
'account_quota': {'readonly': True},
}
_attribute_map = {
'account_quota': {'key': 'accountQuota', 'type': 'int'},
}
def __init__(self):
self.account_quota = None
| mit |
lachlan-00/mytag | mytag/mytag.py | 1 | 45905 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" mytag: Python music tagger and file organiser
----------------Authors----------------
Lachlan de Waard <lachlan.00@gmail.com>
----------------Licence----------------
GNU General Public License version 3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import mytagworker
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Notify
from xdg.BaseDirectory import xdg_config_dirs
# ConfigParser renamed for python3
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
# Python 2 Tag support
if sys.version[0] == '2':
# python-eyeD3 required for editing and loading tags
try:
import eyed3 as eyeD3
TAG_SUPPORT = True
except ImportError:
try:
import eyeD3
TAG_SUPPORT = True
except ImportError:
eyeD3 = None
TAG_SUPPORT = False
# quit if using python3
if sys.version[0] == '3':
# look at using mutagen to support python3 instead of eyed3
# import mutagen
raise Exception('not python3 compatible, please use python 2.x')
# Get OS type
OS = os.name
# Acceptable media files
MEDIA_TYPES = ['.m4a', '.flac', '.ogg', '.mp2', '.mp3', '.wav', '.spx']
# Possible date splitters to get the year
YR_SPLIT = ['-', '/', '\\']
# list of tags the program will replace with the correct tag value
MUSIC_TAGS = ['%artist%', '%albumartist%', '%album%', '%year%',
'%title%', '%disc%', '%track%', '%genre%', '%comment%']
if OS == 'nt':
SLASH = '\\'
UI_FILE = "./main.ui"
CONFIG = './mytag.conf'
ICON_DIR = './gnome/'
USERHOME = os.getenv('userprofile')
elif OS == 'posix':
SLASH = '/'
UI_FILE = "/usr/share/mytag/main.ui"
CONFIG = xdg_config_dirs[0] + '/mytag.conf'
ICON_DIR = '/usr/share/icons/gnome/'
USERHOME = os.getenv('HOME')
class MYTAG(object):
""" browse folders and set tags using ui """
def __init__(self):
""" start mytag """
self.builder = Gtk.Builder()
self.builder.add_from_file(UI_FILE)
self.builder.connect_signals(self)
if not TAG_SUPPORT:
Notify.init('mytag')
title = 'mytag'
note = 'ERROR: install python-eyed3'
notification = Notify.Notification.new(title, note, None)
Notify.Notification.show(notification)
# self.popwindow = self.builder.get_object("popup_window")
# closeerror = self.builder.get_object("closepop")
# closeerror.connect("clicked", self.closeerror)
# self.popwindow.set_markup('MYTAG ERROR: Please install' +
# ' python-eyed3')
# self.popwindow.show()
Gtk.main_quit(self)
raise Exception('Please install python-eyed3')
# Gtk.main()
else:
self.worker = None
if not self.worker:
self.worker = mytagworker.WorkerThread(self)
# get config info
self.checkconfig()
self.conf = ConfigParser.RawConfigParser()
self.conf.read(CONFIG)
self.homefolder = self.conf.get('conf', 'home')
self.library = self.conf.get('conf', 'defaultlibrary')
self.libraryformat = self.conf.get('conf', 'outputstyle')
# backwards compatability for new config options
try:
self.stoponerror = self.conf.get('conf', 'stoponerror')
except ConfigParser.NoOptionError:
self.stoponerror = 'True'
try:
self.movenonmedia = self.conf.get('conf', 'movenonmedia')
except ConfigParser.NoOptionError:
self.movenonmedia = 'True'
try:
self.windowssafe = self.conf.get('conf', 'windowssafe')
except ConfigParser.NoOptionError:
self.windowssafe = 'False'
self.current_dir = self.homefolder
self.current_files = None
self.filelist = None
# load main window items
self.window = self.builder.get_object("main_window")
self.settingsbutton = self.builder.get_object("settingsbutton")
self.editallbutton = self.builder.get_object("editallbutton")
self.editbutton = self.builder.get_object("editbutton")
self.backbutton = self.builder.get_object("backbutton")
self.homebutton = self.builder.get_object("homebutton")
self.gobutton = self.builder.get_object("gobutton")
self.organisebutton = self.builder.get_object('organisebutton')
self.folderlist = self.builder.get_object('folderstore')
self.folderview = self.builder.get_object("folderview")
self.fileview = self.builder.get_object("fileview")
self.contentlist = self.builder.get_object('filestore')
self.contenttree = self.builder.get_object('fileview')
self.titlebutton = self.builder.get_object('titlebutton')
self.artistbutton = self.builder.get_object('artistbutton')
self.albumbutton = self.builder.get_object('albumbutton')
self.albumartistbutton = self.builder.get_object('albumart' +
'istbutton')
self.genrebutton = self.builder.get_object('genrebutton')
self.trackbutton = self.builder.get_object('trackbutton')
self.discbutton = self.builder.get_object('discbutton')
self.yearbutton = self.builder.get_object('yearbutton')
self.commentbutton = self.builder.get_object('commentbutton')
self.titleentry = self.builder.get_object('titleentry')
self.artistentry = self.builder.get_object('artistentry')
self.albumentry = self.builder.get_object('albumentry')
self.albumartistentry = self.builder.get_object('albumart' +
'istentry')
self.genreentry = self.builder.get_object('genreentry')
self.trackentry = self.builder.get_object('trackentry')
self.discentry = self.builder.get_object('discentry')
self.yearentry = self.builder.get_object('yearentry')
self.commententry = self.builder.get_object('commententry')
self.tagimage = self.builder.get_object('tagimage')
self.tagmsg = self.builder.get_object('errormsglabel')
self.currentdirlabel = self.builder.get_object('currentdirlabel')
self.deltitlebutton = self.builder.get_object('clearbutton1')
self.delartistbutton = self.builder.get_object('clearbutton2')
self.delalbumbutton = self.builder.get_object('clearbutton3')
self.delalbumartistbutton = self.builder.get_object('clearbutton4')
self.delgenrebutton = self.builder.get_object('clearbutton5')
self.deltrackbutton = self.builder.get_object('clearbutton6')
self.deldiscbutton = self.builder.get_object('clearbutton7')
self.delyearbutton = self.builder.get_object('clearbutton8')
self.delcommentbutton = self.builder.get_object('clearbutton9')
# fill delete button images
self.delimage1 = self.builder.get_object('delimage1')
self.delimage2 = self.builder.get_object('delimage2')
self.delimage3 = self.builder.get_object('delimage3')
self.delimage4 = self.builder.get_object('delimage4')
self.delimage5 = self.builder.get_object('delimage5')
self.delimage6 = self.builder.get_object('delimage6')
self.delimage7 = self.builder.get_object('delimage7')
self.delimage8 = self.builder.get_object('delimage8')
self.delimage9 = self.builder.get_object('delimage9')
# load config window items
self.confwindow = self.builder.get_object("config_window")
self.libraryentry = self.builder.get_object('libraryentry')
self.styleentry = self.builder.get_object('styleentry')
self.homeentry = self.builder.get_object('homeentry')
self.errorcheck = self.builder.get_object('errorcheck')
self.mediacheck = self.builder.get_object('nonmediacheck')
self.windowscheck = self.builder.get_object('windowssafecheck')
self.applybutton = self.builder.get_object("applyconf")
self.closebutton = self.builder.get_object("closeconf")
# load popup window items
self.popwindow = self.builder.get_object("popup_window")
self.popbutton = self.builder.get_object("closepop")
self.successwindow = self.builder.get_object("success_window")
self.successbutton = self.builder.get_object("closesuccess")
# set tag items
self.title = None
self.artist = None
self.album = None
self.albumartist = None
self.genre = None
self.track = None
self.disc = None
self.year = None
self.comment = None
self.tracklist = None
self.trackselection = None
self.uibuttons = None
# create lists and connect actions
self.loadlists()
self.connectui()
self.run()
def connectui(self):
""" connect all the window wisgets """
# main window actions
self.window.connect("destroy", self.quit)
self.window.connect("key-release-event", self.shortcatch)
self.folderview.connect("key-press-event", self.keypress)
self.fileview.connect("key-press-event", self.keypress)
self.titleentry.connect("key-press-event", self.entrycatch)
self.artistentry.connect("key-press-event", self.entrycatch)
self.albumentry.connect("key-press-event", self.entrycatch)
self.albumartistentry.connect("key-press-event", self.entrycatch)
self.genreentry.connect("key-press-event", self.entrycatch)
self.trackentry.connect("key-press-event", self.entrycatch)
self.discentry.connect("key-press-event", self.entrycatch)
self.yearentry.connect("key-press-event", self.entrycatch)
self.commententry.connect("key-press-event", self.entrycatch)
self.settingsbutton.connect("clicked", self.showconfig)
self.editallbutton.connect("clicked", self.loadcurrentfolder)
self.editbutton.connect("clicked", self.loadselection)
self.backbutton.connect("clicked", self.goback)
self.homebutton.connect("clicked", self.gohome)
self.gobutton.connect("clicked", self.savetags)
self.organisebutton.connect("clicked", self.organisefolder)
self.deltitlebutton.connect("clicked", self.clearentries)
self.delartistbutton.connect("clicked", self.clearentries)
self.delalbumbutton.connect("clicked", self.clearentries)
self.delalbumartistbutton.connect("clicked", self.clearentries)
self.delgenrebutton.connect("clicked", self.clearentries)
self.deltrackbutton.connect("clicked", self.clearentries)
self.deldiscbutton.connect("clicked", self.clearentries)
self.delyearbutton.connect("clicked", self.clearentries)
self.delcommentbutton.connect("clicked", self.clearentries)
# fill delete button images
self.deltitlebutton.set_image(self.delimage1)
self.delartistbutton.set_image(self.delimage2)
self.delalbumbutton.set_image(self.delimage3)
self.delalbumartistbutton.set_image(self.delimage4)
self.delgenrebutton.set_image(self.delimage5)
self.deltrackbutton.set_image(self.delimage6)
self.deldiscbutton.set_image(self.delimage7)
self.delyearbutton.set_image(self.delimage8)
self.delcommentbutton.set_image(self.delimage9)
# config window actions
self.applybutton.connect("clicked", self.saveconf)
self.closebutton.connect("clicked", self.closeconf)
# popup window actions
self.popbutton.connect("clicked", self.closepop)
self.successbutton.connect("clicked", self.closesuccess)
# set up file and folder lists
cell = Gtk.CellRendererText()
foldercolumn = Gtk.TreeViewColumn("Select Folder:", cell, text=0)
filecolumn = Gtk.TreeViewColumn("Select Files", cell, text=0)
self.folderview.connect("row-activated", self.folderclick)
self.folderview.append_column(foldercolumn)
self.folderview.set_model(self.folderlist)
self.fileview.connect("row-activated", self.loadselection)
self.contenttree.append_column(filecolumn)
self.contenttree.set_model(self.contentlist)
self.tagimage.set_from_file(ICON_DIR + '16x16/emotes/face-plain.png')
# list default dir on startup
if not os.path.isdir(self.homefolder):
try:
os.makedirs(self.homefolder)
except OSError:
# unable to create homefolder
self.homefolder = USERHOME
self.listfolder(self.homefolder)
return
def run(self):
""" show the main window and start the main GTK loop """
self.window.show()
Gtk.main()
def loadlists(self):
""" create/empty all the lists used for tagging """
self.title = []
self.artist = []
self.album = []
self.albumartist = []
self.genre = []
self.track = []
self.disc = []
self.year = []
self.comment = []
self.tracklist = []
self.trackselection = [self.title, self.artist, self.album,
self.albumartist, self.genre, self.track,
self.disc, self.year, self.comment]
self.uibuttons = [[self.titlebutton, self.titleentry],
[self.artistbutton, self.artistentry],
[self.albumbutton, self.albumentry],
[self.albumartistbutton, self.albumartistentry],
[self.genrebutton, self.genreentry],
[self.trackbutton, self.trackentry],
[self.discbutton, self.discentry],
[self.yearbutton, self.yearentry],
[self.commentbutton, self.commententry]]
return
def showconfig(self, *args):
""" fill and show the config window """
self.homeentry.set_text(self.homefolder)
self.libraryentry.set_text(self.library)
self.styleentry.set_text(self.libraryformat)
if self.stoponerror == 'True':
self.errorcheck.set_active(True)
else:
self.errorcheck.set_active(False)
if self.movenonmedia == 'True':
self.mediacheck.set_active(True)
else:
self.mediacheck.set_active(False)
if self.windowssafe == 'True':
self.windowscheck.set_active(True)
else:
self.windowscheck.set_active(False)
self.confwindow.show()
return
def saveconf(self, *args):
""" save any config changes and update live settings"""
self.conf.read(CONFIG)
self.conf.set('conf', 'home', self.homeentry.get_text())
self.conf.set('conf', 'defaultlibrary', self.libraryentry.get_text())
self.conf.set('conf', 'outputstyle', self.styleentry.get_text())
if self.errorcheck.get_active():
self.conf.set('conf', 'stoponerror', 'True')
self.stoponerror = 'True'
else:
self.conf.set('conf', 'stoponerror', 'False')
self.stoponerror = 'False'
if self.mediacheck.get_active():
self.conf.set('conf', 'movenonmedia', 'True')
self.movenonmedia = 'True'
else:
self.conf.set('conf', 'movenonmedia', 'False')
self.movenonmedia = 'False'
if self.windowscheck.get_active():
self.conf.set('conf', 'windowssafe', 'True')
self.windowssafe = 'True'
else:
self.conf.set('conf', 'windowssafe', 'False')
self.windowssafe = 'False'
self.homefolder = self.homeentry.get_text()
self.library = self.libraryentry.get_text()
self.libraryformat = self.styleentry.get_text()
# write to conf file
conffile = open(CONFIG, "w")
self.conf.write(conffile)
conffile.close()
Notify.init('mytag')
title = 'mytag'
note = 'CONFIG: Changes Saved'
notification = Notify.Notification.new(title, note, ICON_DIR +
'24x24/actions/gtk-save.png')
Notify.Notification.show(notification)
return
def checkconfig(self):
""" create a default config if not available """
if not os.path.isdir(os.path.dirname(CONFIG)):
os.makedirs(os.path.dirname(CONFIG))
if not os.path.isfile(CONFIG):
conffile = open(CONFIG, "w")
conffile.write("[conf]\nhome = " + os.getenv('HOME') +
"\ndefaultlibrary = " + os.getenv('HOME') +
"\noutputstyle = %albumartist%/(%year%) " +
"%album%/%disc%%track% - %title%\n" +
"stoponerror = True\nmovenonmedia = True\n" +
"windowssafe = False\n")
conffile.close()
return
def closeconf(self, *args):
""" hide the config window """
self.confwindow.hide()
return
def closeerror(self, *args):
""" hide the error window """
self.popwindow.destroy()
Gtk.main_quit(*args)
raise Exception('Please install python-eyed3')
def closepop(self, *args):
""" hide the error popup window """
self.popwindow.hide()
return
def closesuccess(self, *args):
""" hide the organise completed window """
self.successwindow.hide()
return
def clearentries(self, actor):
""" Clear the list of tags before reopening files """
if actor == self.deltitlebutton:
if self.titlebutton.get_active():
self.titlebutton.set_active(False)
self.titleentry.set_text('')
if actor == self.delartistbutton:
if self.artistbutton.get_active():
self.artistbutton.set_active(False)
self.artistentry.set_text('')
if actor == self.delalbumbutton:
if self.albumbutton.get_active():
self.albumbutton.set_active(False)
self.albumentry.set_text('')
if actor == self.delalbumartistbutton:
if self.albumartistbutton.get_active():
self.albumartistbutton.set_active(False)
self.albumartistentry.set_text('')
if actor == self.delgenrebutton:
if self.genrebutton.get_active():
self.genrebutton.set_active(False)
self.genreentry.set_text('')
if actor == self.deltrackbutton:
if self.trackbutton.get_active():
self.trackbutton.set_active(False)
self.trackentry.set_text('')
if actor == self.deldiscbutton:
if self.discbutton.get_active():
self.discbutton.set_active(False)
self.discentry.set_text('')
if actor == self.delyearbutton:
if self.yearbutton.get_active():
self.yearbutton.set_active(False)
self.yearentry.set_text('')
if actor == self.delcommentbutton:
if self.commentbutton.get_active():
self.commentbutton.set_active(False)
self.commententry.set_text('')
def loadselection(self, *args):
""" load selected files into tag editor """
model, fileiter = self.contenttree.get_selection().get_selected_rows()
self.current_files = []
for files in fileiter:
tmp_file = self.current_dir + '/' + model[files][0]
self.current_files.append(tmp_file)
self.tagimage.set_from_file(ICON_DIR + '16x16/emotes/face-plain.png')
self.loadtags(self.current_files)
return
def loadcurrentfolder(self, *args):
""" load current all files in current folder into list """
self.current_files = []
for files in os.listdir(self.current_dir):
tmp_file = self.current_dir + '/' + files
tmp_ext = files[(files.rfind('.')):].lower() in MEDIA_TYPES
if os.path.isfile(tmp_file) and tmp_ext:
self.current_files.append(tmp_file)
self.tagimage.set_from_file(ICON_DIR + '16x16/emotes/face-plain.png')
self.loadtags(self.current_files)
return
def folderclick(self, *args):
""" traverse folders on double click """
model, treeiter = self.folderview.get_selection().get_selected()
new_dir = None
if treeiter:
new_dir = self.current_dir + '/' + model[treeiter][0]
if os.path.isdir(new_dir):
self.listfolder(new_dir)
return
def gohome(self, *args):
""" go to the defined home folder """
self.clearopenfiles()
self.listfolder(self.homefolder)
def goback(self, *args):
""" go back the the previous directory """
back_dir = os.path.dirname(self.current_dir)
self.clearopenfiles()
self.listfolder(back_dir)
return
def keypress(self, actor, event):
""" capture backspace key for folder navigation """
if event.get_keycode()[1] == 22:
self.goback()
def shortcatch(self, actor, event):
""" capture keys for shortcuts """
test_mask = (event.state & Gdk.ModifierType.CONTROL_MASK ==
Gdk.ModifierType.CONTROL_MASK)
if event.get_state() and test_mask:
if event.get_keycode()[1] == 39:
self.savetags()
if event.get_keycode()[1] == 46:
self.loadselection()
if event.get_keycode()[1] == 56:
self.goback()
if event.get_keycode()[1] == 43:
self.gohome()
def entrycatch(self, actor, event):
""" capture key presses to activate checkboxes """
movement_keys = [22, 23, 36, 37, 50, 62, 64, 65, 66,
105, 108, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119]
test_mask = (event.state & Gdk.ModifierType.CONTROL_MASK ==
Gdk.ModifierType.CONTROL_MASK)
# only set active when not using movement keys
if not event.get_keycode()[1] in movement_keys and not test_mask:
if actor == self.titleentry:
if not self.titlebutton.get_active():
self.titlebutton.set_active(True)
if actor == self.artistentry:
if not self.artistbutton.get_active():
self.artistbutton.set_active(True)
if actor == self.albumentry:
if not self.albumbutton.get_active():
self.albumbutton.set_active(True)
if actor == self.albumartistentry:
if not self.albumartistbutton.get_active():
self.albumartistbutton.set_active(True)
if actor == self.genreentry:
if not self.genrebutton.get_active():
self.genrebutton.set_active(True)
if actor == self.trackentry:
if not self.trackbutton.get_active():
self.trackbutton.set_active(True)
if actor == self.discentry:
if not self.discbutton.get_active():
self.discbutton.set_active(True)
if actor == self.yearentry:
if not self.yearbutton.get_active():
self.yearbutton.set_active(True)
if actor == self.commententry:
if not self.commentbutton.get_active():
self.commentbutton.set_active(True)
def quit(self, *args):
""" stop the process thread and close the program"""
if self.worker:
self.worker.stop()
self.confwindow.destroy()
self.window.destroy()
Gtk.main_quit(*args)
return False
def organisefolder(self, *args):
""" send organise to the workerthread for processing """
returnstring = self.worker.run(self.current_dir, self.filelist,
self.library, self.libraryformat,
self.stoponerror, self.movenonmedia,
self.windowssafe)
# notify for different errors
if type(returnstring) == type(''):
if returnstring == 'permissions':
Notify.init('mytag')
title = 'mytag'
note = 'ERROR: Check Folder Permissions'
notification = Notify.Notification.new(title, note, ICON_DIR +
'24x24/status/error.png')
Notify.Notification.show(notification)
# self.popwindow.set_markup('Error: Unable to modify folder.' +
# ' Check Permissions')
# self.popwindow.show()
self.listfolder(self.current_dir)
return False
else:
Notify.init('mytag')
title = 'mytag'
note = 'ERROR: Opening ' + returnstring
notification = Notify.Notification.new(title, note, ICON_DIR +
'24x24/status/error.png')
Notify.Notification.show(notification)
# self.popwindow.set_markup('Error: Opening ' + returnstring)
# self.popwindow.show()
self.listfolder(self.current_dir)
return False
if type(returnstring) == type([]):
Notify.init('mytag')
title = 'mytag'
note = 'ERROR: ' + returnstring[0] + ' missing. ' + returnstring[1]
notification = Notify.Notification.new(title, note, ICON_DIR +
'24x24/status/error.png')
Notify.Notification.show(notification)
# self.popwindow.set_markup('Error: ' + returnstring[0] +
# ' missing')
# self.popwindow.format_secondary_text(returnstring[1])
# self.popwindow.show()
self.listfolder(self.current_dir)
return False
else:
Notify.init('mytag')
title = 'mytag'
note = 'SUCCESS: Your files have been organised'
notification = Notify.Notification.new(title, note, ICON_DIR +
'24x24/actions/filesave.png')
Notify.Notification.show(notification)
# self.successwindow.show()
if not os.path.isdir(self.current_dir):
if os.path.isdir(os.path.dirname(self.current_dir)):
self.current_dir = os.path.dirname(self.current_dir)
self.listfolder(self.current_dir)
else:
self.gohome()
else:
self.listfolder(self.current_dir)
return True
def savetags(self, *args):
""" update the loaded files with new tags """
count = 0
tmp_changes = []
# reset tag image for each save
self.tagimage.set_from_file(ICON_DIR + '16x16/emotes/face-plain.png')
while Gtk.events_pending():
Gtk.main_iteration()
# check for changes
if self.current_files == tmp_changes:
return False
# add changes that are ticked in the UI
while count < len(self.uibuttons):
if self.uibuttons[count][0].get_active():
tmp_changes.append([count, self.uibuttons[count][1].get_text()])
count += 1
save_fail = False
# update tags for each file selected
for files in self.current_files:
tmp_title = None
tmp_artist = None
tmp_album = None
tmp_albumartist = None
tmp_genre = None
tmp_track = None
tmp_disc = None
tmp_year = None
tmp_comment = None
try:
item = eyeD3.Tag()
item.link(files)
item.setVersion(eyeD3.ID3_V2_4)
item.setTextEncoding(eyeD3.UTF_8_ENCODING)
except:
item = None
save_fail = True
if item:
# get the current tags
current_title = item.getTitle()
if current_title == 'None':
current_title = None
current_artist = item.getArtist('TPE1')
if current_artist == 'None':
current_artist = None
current_album = item.getAlbum()
if current_album == 'None':
current_album = None
current_albumartist = item.getArtist('TPE2')
if current_albumartist == 'None':
current_albumartist = None
try:
current_genre = str(item.getGenre())
except eyeD3.tag.GenreException:
current_genre = None
if current_genre == 'None':
current_genre = None
if current_genre:
current_genre = current_genre.replace('/', '_')
if ')' in current_genre:
current_genre = current_genre.split(')')[1]
current_track = str(item.getTrackNum()[0])
if '/' in current_track:
current_track = current_track.split('/')[0]
if len(current_track) == 1:
current_track = '0' + str(current_track)
if len(current_track) > 2:
current_track = current_track[:2]
current_disc = str(item.getDiscNum()[0])
if current_disc == 'None':
current_disc = None
if current_disc:
if '/' in current_disc:
current_disc = current_disc.split('/')[0]
if len(current_disc) == 2 and current_disc <= 9:
current_disc = current_disc[-1]
current_year = item.getYear()
current_comment = item.getComment()
if current_comment == 'None':
current_comment = None
# get the changes from the UI
for changes in tmp_changes:
if changes[0] == 0:
tmp_title = changes[1]
if changes[0] == 1:
tmp_artist = changes[1]
if changes[0] == 2:
tmp_album = changes[1]
if changes[0] == 3:
tmp_albumartist = changes[1]
if changes[0] == 4:
tmp_genre = changes[1]
if changes[0] == 5:
tmp_track = changes[1]
if changes[0] == 6:
tmp_disc = changes[1]
if changes[0] == 7:
tmp_year = changes[1]
if changes[0] == 8:
tmp_comment = changes[1]
# compare and set changes if required
if tmp_title is not None and tmp_title != current_title:
item.setTitle(tmp_title)
if tmp_artist is not None and tmp_artist != current_artist:
item.setArtist(tmp_artist)
if tmp_album is not None and tmp_album != current_album:
item.setAlbum(tmp_album)
if tmp_albumartist is not None and (tmp_albumartist !=
current_albumartist):
item.setArtist(tmp_albumartist, 'TPE2')
if tmp_genre is not None and tmp_genre != current_genre:
item.setGenre(tmp_genre)
if tmp_track is not None and tmp_track != current_track:
item.setTrackNum([tmp_track, None])
if tmp_disc is not None and tmp_disc != current_disc:
item.setDiscNum([tmp_disc, None])
if tmp_year is not None and tmp_year != current_year:
item.setTextFrame('TDRC', tmp_year)
item.setTextFrame('TDRL', tmp_year)
item.setTextFrame('TYER', tmp_year)
if tmp_comment is not None and tmp_comment != current_comment:
item.removeComments()
item.addComment(tmp_comment)
try:
# write changes to file
item.update(eyeD3.ID3_V2_4)
except IOError:
self.tagmsg.set_text('File Permission Error')
self.tagimage.set_from_file(ICON_DIR +
'16x16/emotes/face-crying.png')
save_fail = True
print('Tag Save Error ' + files)
return False
except:
self.tagimage.set_from_file(ICON_DIR +
'16x16/emotes/face-crying.png')
save_fail = True
print('Tag Save Error ' + files)
return False
# reload new tags after saving files
self.loadtags(self.current_files)
if not save_fail:
self.tagimage.set_from_file(ICON_DIR +
'16x16/emotes/face-laugh.png')
return
def loadtags(self, *args):
""" connect chosen files with tags """
self.loadlists()
self.clearopenfiles()
filelist = args[0]
# try to get disk/track by filenames
filenames = []
numericlist = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
punctlist = [' ', '.', '-', '_']
discfinder = None
discchanged = False
trackfinder = None
trackchanged = False
# multipletracks = True
multipledisc = False
test_disc = None
# get the file basenames for checking
for musicfiles in filelist:
filenames.append(os.path.basename(musicfiles))
# for single files attempt to guess disk and track if missing
if len(filenames) == 1:
one = filenames[0][0]
two = filenames[0][1]
three = filenames[0][2]
four = filenames[0][3]
# possible no disc eg. "01.", "03-"
if one == '0' and two in numericlist and three in punctlist:
discfinder = '1'
trackfinder = one + two
# files with disc number "101-", etc
elif (one in numericlist and (two in numericlist or two == '0') and
three in numericlist and four in punctlist):
discfinder = one
trackfinder = two + three
else:
discfinder = 'None'
# for whole lists only find disk and search track for each file
elif len(filenames) > 1:
trackfinder = '[Multiple]'
for i in filenames:
one = i[0]
two = i[1]
three = i[2]
four = i[3]
# possible no disc eg. "01.", "03-"
if one == '0' and two in numericlist and three in punctlist:
test_disc = '1'
# files with disc number "101-", etc
elif (one in numericlist and
(two in numericlist or two == '0') and
three in numericlist and
four in punctlist):
if not multipledisc:
test_disc = one
multipledisc = True
elif one > test_disc:
test_disc = 'None'
discfinder = test_disc
# pull tags for each music file
for musicfiles in filelist:
# filename = os.path.basename(musicfiles)
tmp_title = None
tmp_artist = None
tmp_album = None
tmp_albumartist = None
tmp_genre = None
tmp_track = None
tmp_disc = None
tmp_year = None
tmp_comment = None
try:
item = eyeD3.Tag()
item.link(musicfiles)
item.setVersion(eyeD3.ID3_V2_4)
item.setTextEncoding(eyeD3.UTF_8_ENCODING)
except:
# Tag error
item = None
# for single files attempt to guess disk and track if missing
one = musicfiles[0]
two = musicfiles[1]
three = musicfiles[2]
four = musicfiles[3]
# possible no disc eg. "01.", "03-"
if one == '0' and two in numericlist and three in punctlist:
if len(filenames) == 1:
discfinder = '1'
trackfinder = one + two
# files with disc number "101-", etc
elif (one in numericlist and (two in numericlist or two == '0') and
three in numericlist and four in punctlist):
if len(filenames) == 1:
discfinder = one
trackfinder = two + three
# pull tag info per item
if item:
tmp_title = item.getTitle()
if tmp_title == 'None':
tmp_title = None
tmp_artist = item.getArtist('TPE1')
if tmp_artist == 'None':
tmp_artist = None
tmp_album = item.getAlbum()
if tmp_album == 'None':
tmp_album = None
tmp_albumartist = item.getArtist('TPE2')
if tmp_albumartist == 'None':
tmp_albumartist = None
try:
tmp_genre = str(item.getGenre())
except eyeD3.tag.GenreException:
tmp_genre = None
if tmp_genre == 'None':
tmp_genre = None
if tmp_genre:
tmp_genre = tmp_genre.replace('/', '_')
if ')' in tmp_genre:
tmp_genre = tmp_genre.split(')')[1]
tmp_track = str(item.getTrackNum()[0])
if tmp_track == 'None':
if trackfinder != 'None':
print('No Track Tag')
trackchanged = True
tmp_track = trackfinder
else:
tmp_track = None
if tmp_track:
if '/' in tmp_track:
tmp_track = tmp_track.split('/')[0]
if len(tmp_track) == 1:
tmp_track = '0' + str(tmp_track)
if len(tmp_track) > 2:
tmp_track = tmp_track[:2]
tmp_disc = str(item.getDiscNum()[0])
if discfinder and tmp_disc == 'None':
print('No Disc Tag')
discchanged = True
tmp_disc = discfinder
if tmp_disc == 'None':
tmp_disc = None
if tmp_disc:
if '/' in tmp_disc:
tmp_disc = tmp_disc.split('/')[0]
if len(tmp_disc) == 2 and tmp_disc <= 9:
tmp_disc = tmp_disc[-1]
tmp_year = item.getYear()
if tmp_year == 'None':
tmp_year = None
tmp_comment = item.getComment()
if tmp_comment == 'None':
tmp_comment = None
# add tags to list
self.title.append(tmp_title)
self.artist.append(tmp_artist)
self.album.append(tmp_album)
self.albumartist.append(tmp_albumartist)
self.genre.append(tmp_genre)
self.track.append(tmp_track)
self.disc.append(tmp_disc)
self.year.append(tmp_year)
self.comment.append(tmp_comment)
# compare tags
count = 0
for types in self.trackselection:
if not types:
return False
comparison = False
if len(args[0]) == 1:
comparison = True
for item in types[1:]:
if item is None:
comparison = False
break
if item != types[0]:
comparison = False
break
comparison = True
if comparison:
self.uibuttons[count][0].set_active(True)
if types[0]:
self.uibuttons[count][1].set_text(types[0])
if count == 5 and trackchanged:
self.uibuttons[count][0].set_active(False)
if count == 6 and discchanged:
self.uibuttons[count][0].set_active(False)
else:
self.uibuttons[count][0].set_active(False)
self.uibuttons[count][1].set_text('')
else:
self.uibuttons[count][0].set_active(False)
if not types[0]:
self.uibuttons[count][1].set_text('')
else:
self.uibuttons[count][1].set_text('[Multiple]')
count += 1
return
def clearopenfiles(self):
""" clear the tags ui when changing folder """
count = 0
while count < len(self.uibuttons):
self.uibuttons[count][0].set_active(False)
self.uibuttons[count][1].set_text('')
count += 1
self.tagimage.set_from_file(ICON_DIR + '16x16/emotes/face-plain.png')
self.tagmsg.set_text('')
return
def listfolder(self, *args):
""" function to list the folder column """
self.current_dir = args[0]
self.current_dir = self.current_dir.replace('//', '/')
self.currentdirlabel.set_text('Current Folder: ' +
str(os.path.normpath(self.current_dir)))
if not type(args[0]) == type(''):
self.current_dir = args[0].get_current_folder()
try:
self.filelist = os.listdir(self.current_dir)
self.filelist.sort(key=lambda y: y.lower())
except OSError:
self.gohome()
# clear list if we have scanned before
for items in self.folderlist:
self.folderlist.remove(items.iter)
# clear combobox before adding entries
for items in self.folderview:
self.folderview.remove(items.iter)
# search the supplied directory for items
for items in self.filelist:
test_dir = os.path.isdir(self.current_dir + '/' + items)
if not items[0] == '.' and test_dir:
self.folderlist.append([items])
if len(self.folderlist) == 0:
self.folderlist.append(['[No more Folders]'])
self.clearopenfiles()
self.listfiles()
return
def listfiles(self, *args):
""" function to fill the file list column """
files_dir = None
self.current_files = []
try:
files_dir = os.listdir(self.current_dir)
files_dir.sort(key=lambda y: y.lower())
except OSError:
self.gohome()
# clear list if we have scanned before
for items in self.contentlist:
self.contentlist.remove(items.iter)
# clear combobox before adding entries
for items in self.contenttree:
self.contenttree.remove(items.iter)
# search the supplied directory for items
for items in files_dir:
test_file = os.path.isfile(self.current_dir + '/' + items)
test_ext = items[(items.rfind('.')):].lower() in MEDIA_TYPES
if not items[0] == '.' and test_file and test_ext:
self.contentlist.append([items])
if len(self.contentlist) == 0:
self.contentlist.append(['[No media files found]'])
return
if __name__ == "__main__":
GLib.threads_init()
MYTAG()
| gpl-3.0 |
Matty-Downing2169/opencamlib | scripts/drop-cutter/drop_cutter_tst_3.py | 8 | 1238 | import ocl
import pyocl
import camvtk
import time
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
a=ocl.Point(1,0,0)
myscreen.addActor(camvtk.Point(center=(1,0,0), color=(1,1,1)));
b=ocl.Point(0,1,0)
myscreen.addActor(camvtk.Point(center=(0,1,0), color=(1,1,1)));
c=ocl.Point(0,0,0.2)
myscreen.addActor( camvtk.Point(center=(0,0,0.2), color=(1,1,1)));
myscreen.addActor( camvtk.Line(p1=(1,0,0),p2=(0,0,0.2)) )
myscreen.addActor( camvtk.Line(p1=(0,0,0.2),p2=(0,1,0)) )
myscreen.addActor( camvtk.Line(p1=(1,0,0),p2=(0,1,0)) )
t = ocl.Triangle(a,b,c)
cutter = ocl.CylCutter(.3, 5)
print cutter
minx=-0.2
dx=0.02
maxx=1.2
miny=-0.2
dy=0.2
maxy=1.2
z=-0.2
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
for cl in clpoints:
cutter.dropCutter(cl,t)
print len(clpoints), " cl points evaluated"
# draw the points
camvtk.drawCLPointCloud(myscreen, clpoints)
#position camera
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| gpl-3.0 |
shareactorIO/pipeline | oreilly.ml/high-performance-tensorflow/config/jupyterhub/jupyterhub_config.py | 1 | 16044 | import os
# Configuration file for jupyterhub.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterHub configuration
#------------------------------------------------------------------------------
# An Application for starting a Multi-User Jupyter Notebook server.
# Grant admin users permission to access single-user servers.
#
# Users should be properly informed if this is enabled.
c.JupyterHub.admin_access = True
# DEPRECATED, use Authenticator.admin_users instead.
# c.JupyterHub.admin_users = set()
# Answer yes to any questions (e.g. confirm overwrite)
c.JupyterHub.answer_yes = True
# Dict of token:username to be loaded into the database.
#
# Allows ahead-of-time generation of API tokens for use by services.
# c.JupyterHub.api_tokens = {}
# Class for authenticating users.
#
# This should be a class with the following form:
#
# - constructor takes one kwarg: `config`, the IPython config object.
#
# - is a tornado.gen.coroutine
# - returns username on success, None on failure
# - takes two arguments: (handler, data),
# where `handler` is the calling web.RequestHandler,
# and `data` is the POST form data from the login page.
#c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator'
c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
#c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
#c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
#c.GitHubOAuthenticator.client_id = os.environ['GITHUB_CLIENT_ID']
#c.GitHubOAuthenticator.client_secret = os.environ['GITHUB_CLIENT_SECRET']
# The base URL of the entire application
c.JupyterHub.base_url = '/'
# Whether to shutdown the proxy when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the proxy
# running.
#
# Only valid if the proxy was starting by the Hub process.
#
# If both this and cleanup_servers are False, sending SIGINT to the Hub will
# only shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
c.JupyterHub.cleanup_proxy = True
# Whether to shutdown single-user servers when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the single-
# user servers running.
#
# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
# shutdown the Hub, leaving everything else running.
#
# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
# shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
c.JupyterHub.cleanup_servers = True
# The config file to load
# c.JupyterHub.config_file = '/root/config/jupyter/jupyterhub_config.py'
# Confirm that JupyterHub should be run without SSL. This is **NOT RECOMMENDED**
# unless SSL termination is being handled by another layer.
c.JupyterHub.confirm_no_ssl = True
# Number of days for a login cookie to be valid. Default is two weeks.
# c.JupyterHub.cookie_max_age_days = 14
# The cookie secret to use to encrypt cookies.
#
# Loaded from the JPY_COOKIE_SECRET env variable by default.
# c.JupyterHub.cookie_secret = b''
# File in which to store the cookie secret.
# c.JupyterHub.cookie_secret_file = '/root/pipeline/work/jupyterhub/jupyterhub_cookie_secret'
# The location of jupyterhub data files (e.g. /usr/local/share/jupyter/hub)
# c.JupyterHub.data_files_path = '/root/pipeline/work/jupyterhub'
# Include any kwargs to pass to the database connection. See
# sqlalchemy.create_engine for details.
# c.JupyterHub.db_kwargs = {}
# url for the database. e.g. `sqlite:///jupyterhub.sqlite`
#c.JupyterHub.db_url = 'sqlite:////root/jupyterhub.sqlite'
# log all database transactions. This has A LOT of output
# c.JupyterHub.debug_db = False
# show debug output in configurable-http-proxy
# c.JupyterHub.debug_proxy = False
# Send JupyterHub's logs to this file.
#
# This will *only* include the logs of the Hub itself, not the logs of the proxy
# or any single-user servers.
c.JupyterHub.extra_log_file = '/root/logs/jupyterhub.log'
# Extra log handlers to set on JupyterHub logger
# c.JupyterHub.extra_log_handlers = []
# Generate default config file
# #c.JupyterHub.generate_config = False
# The ip for this process
c.JupyterHub.hub_ip = '0.0.0.0'
# The port for this process
# c.JupyterHub.hub_port = 3081
# The prefix for the hub server. Must not be '/'
# c.JupyterHub.hub_prefix = '/hub/'
# The public facing ip of the whole application (the proxy)
c.JupyterHub.ip = '0.0.0.0'
# Supply extra arguments that will be passed to Jinja environment.
# c.JupyterHub.jinja_environment_options = {}
# Interval (in seconds) at which to update last-activity timestamps.
# c.JupyterHub.last_activity_interval = 300
# Specify path to a logo image to override the Jupyter logo in the banner.
# c.JupyterHub.logo_file = ''
# File to write PID Useful for daemonizing jupyterhub.
# c.JupyterHub.pid_file = ''
# The public facing port of the proxy
c.JupyterHub.port = 8754
# The ip for the proxy API handlers
c.JupyterHub.proxy_api_ip = '0.0.0.0'
c.Session.debug = True
# The port for the proxy API handlers
# c.JupyterHub.proxy_api_port = 0
# The Proxy Auth token.
#
# Loaded from the CONFIGPROXY_AUTH_TOKEN env variable by default.
# c.JupyterHub.proxy_auth_token = ''
# Interval (in seconds) at which to check if the proxy is running.
# c.JupyterHub.proxy_check_interval = 30
# The command to start the http proxy.
#
# Only override if configurable-http-proxy is not on your PATH
# c.JupyterHub.proxy_cmd = ['configurable-http-proxy']
# Purge and reset the database.
# c.JupyterHub.reset_db = False
# The class to use for spawning single-user servers.
#
# Should be a subclass of Spawner.
#c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'
#c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = 'simplespawner.SimpleLocalProcessSpawner'
c.SimpleLocalProcessSpawner.home_path_template = '/root/'
# Spawn user containers from this image
#c.DockerSpawner.container_image = 'jupyter/pyspark-notebook'
# Have the Spawner override the Docker run command
#c.DockerSpawner.extra_create_kwargs.update({
# 'command': '/usr/local/bin/start-singleuser.sh'
#})
# Path to SSL certificate file for the public facing interface of the proxy
#
# Use with ssl_key
# c.JupyterHub.ssl_cert = ''
# Path to SSL key file for the public facing interface of the proxy
#
# Use with ssl_cert
# c.JupyterHub.ssl_key = ''
# Host to send statds metrics to
# c.JupyterHub.statsd_host = ''
# Port on which to send statsd metrics about the hub
# c.JupyterHub.statsd_port = 8125
# Prefix to use for all metrics sent by jupyterhub to statsd
# c.JupyterHub.statsd_prefix = 'jupyterhub'
# Run single-user servers on subdomains of this host.
#
# This should be the full https://hub.domain.tld[:port]
#
# Provides additional cross-site protections for javascript served by single-
# user servers.
#
# Requires <username>.hub.domain.tld to resolve to the same host as
# hub.domain.tld.
#
# In general, this is most easily achieved with wildcard DNS.
#
# When using SSL (i.e. always) this also requires a wildcard SSL certificate.
# c.JupyterHub.subdomain_host = ''
# Paths to search for jinja templates.
# c.JupyterHub.template_paths = []
# Extra settings overrides to pass to the tornado application.
# c.JupyterHub.tornado_settings = {}
#------------------------------------------------------------------------------
# Spawner configuration
#------------------------------------------------------------------------------
# Base class for spawning single-user notebook servers.
#
# Subclass this, and override the following methods:
#
# - load_state - get_state - start - stop - poll
# Extra arguments to be passed to the single-user server
# c.Spawner.args = []
# The command used for starting notebooks.
# c.Spawner.cmd = ['jupyterhub-singleuser']
# Enable debug-logging of the single-user server
c.Spawner.debug = True
# The default URL for the single-user server.
#
# Can be used in conjunction with --notebook-dir=/ to enable full filesystem
# traversal, while preserving user's homedir as landing page for notebook
#
# `%U` will be expanded to the user's username
c.Spawner.default_url = '/lab'
# Disable per-user configuration of single-user servers.
#
# This prevents any config in users' $HOME directories from having an effect on
# their server.
c.Spawner.disable_user_config = True
# Whitelist of environment variables for the subprocess to inherit
c.Spawner.env_keep = ['CUDA_PKG_VERSION', 'CUDA_VERSION', 'CUDNN_VERSION', 'HADOOP_HDFS_HOME', 'HADOOP_CONF', 'HADOOP_CONF_DIR', 'HADOOP_HOME', 'HADOOP_OPTS', 'HADOOP_VERSION', 'HOME', 'HOSTNAME', 'JAVA_HOME', 'LD_LIBRARY_PATH', 'LIBRARY_PATH', 'PATH', 'PYSPARK_VERSION', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'SPARK_HOME', 'SPARK_VERSION', 'TENSORFLOW_VERSION', 'PYSPARK_PYTHON', 'SPARK_MASTER', 'PYSPARK_SUBMIT_ARGS', 'SPARK_SUBMIT_ARGS', 'TF_CPP_MIN_LOG_LEVEL', 'TF_XLA_FLAGS', 'TENSORFLOW_HOME', 'TENSORFLOW_SERVING_HOME']
# Environment variables to load for the Spawner.
#
# Value could be a string or a callable. If it is a callable, it will be called
# with one parameter, which will be the instance of the spawner in use. It
# should quickly (without doing much blocking operations) return a string that
# will be used as the value for the environment variable.
# c.Spawner.environment = {}
# Timeout (in seconds) before giving up on a spawned HTTP server
#
# Once a server has successfully been spawned, this is the amount of time we
# wait before assuming that the server is unable to accept connections.
# c.Spawner.http_timeout = 30
# The IP address (or hostname) the single-user server should listen on
c.Spawner.ip = '0.0.0.0'
# The notebook directory for the single-user server
#
# `~` will be expanded to the user's home directory `%U` will be expanded to the
# user's username
c.Spawner.notebook_dir = 'notebooks'
# An HTML form for options a user can specify on launching their server. The
# surrounding `<form>` element and the submit button are already provided.
#
# For example:
# <br>
# Choose a letter:
# <select name="letter" multiple="true">
# <option value="A">The letter A</option>
# <option value="B">The letter B</option>
# </select>
# c.Spawner.options_form = ''
# Interval (in seconds) on which to poll the spawner.
# c.Spawner.poll_interval = 30
# Timeout (in seconds) before giving up on the spawner.
#
# This is the timeout for start to return, not the timeout for the server to
# respond. Callers of spawner.start will assume that startup has failed if it
# takes longer than this. start should return when the server process is started
# and its location is known.
# c.Spawner.start_timeout = 60
#------------------------------------------------------------------------------
# LocalProcessSpawner configuration
#------------------------------------------------------------------------------
# A Spawner that just uses Popen to start local processes as users.
#
# Requires users to exist on the local system.
#
# This is the default spawner for JupyterHub.
# Seconds to wait for process to halt after SIGINT before proceeding to SIGTERM
# c.LocalProcessSpawner.INTERRUPT_TIMEOUT = 10
# Seconds to wait for process to halt after SIGKILL before giving up
# c.LocalProcessSpawner.KILL_TIMEOUT = 5
# Seconds to wait for process to halt after SIGTERM before proceeding to SIGKILL
# c.LocalProcessSpawner.TERM_TIMEOUT = 5
#------------------------------------------------------------------------------
# Authenticator configuration
#------------------------------------------------------------------------------
# A class for authentication.
#
# The primary API is one method, `authenticate`, a tornado coroutine for
# authenticating users.
# set of usernames of admin users
#
# If unspecified, only the user that launches the server will be admin.
#c.Authenticator.admin_users = {"root"}
# Dictionary mapping authenticator usernames to JupyterHub users.
#
# Can be used to map OAuth service names to local users, for instance.
#
# Used in normalize_username.
# c.Authenticator.username_map = {}
# Regular expression pattern for validating usernames.
#
# If not defined: allow any username.
# c.Authenticator.username_pattern = ''
# Username whitelist.
#
# Use this to restrict which users can login. If empty, allow any user to
# attempt login.
#c.Authenticator.whitelist = set("")
#------------------------------------------------------------------------------
# LocalAuthenticator configuration
#------------------------------------------------------------------------------
# Base class for Authenticators that work with local Linux/UNIX users
#
# Checks for local users, and can attempt to create them if they exist.
# The command to use for creating users as a list of strings.
#
# For each element in the list, the string USERNAME will be replaced with the
# user's username. The username will also be appended as the final argument.
#
# For Linux, the default value is:
#
# ['adduser', '-q', '--gecos', '""', '--disabled-password']
#
# To specify a custom home directory, set this to:
#
# ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME',
# '--disabled-password']
#
# This will run the command:
#
# adduser -q --gecos "" --home /customhome/river --disabled-password river
#
# when the user 'river' is created.
#c.LocalAuthenticator.add_user_cmd = []
# If a user is added that doesn't exist on the system, should I try to create
# the system user?
c.LocalAuthenticator.create_system_users = False
# Automatically whitelist anyone in this group.
#c.LocalAuthenticator.group_whitelist = set("root")
#------------------------------------------------------------------------------
# PAMAuthenticator configuration
#------------------------------------------------------------------------------
# Authenticate local Linux/UNIX users with PAM
# The encoding to use for PAM
# c.PAMAuthenticator.encoding = 'utf8'
# Whether to open PAM sessions when spawners are started.
#
# This may trigger things like mounting shared filsystems, loading credentials,
# etc. depending on system configuration, but it does not always work.
#
# It can be disabled with::
#
# c.PAMAuthenticator.open_sessions = False
# The PAM service to use for authentication.
# c.PAMAuthenticator.service = 'login'
| apache-2.0 |
Minestack/docker-bukkit | setup.py | 1 | 5241 | #!/bin/python
import os
import sys
from pymongo import MongoClient
from bson.objectid import ObjectId
def modifyConfig(expression, value):
print('Modifying server.properties '+expression+' with value '+str(value))
os.system("sed -i 's/"+str(expression)+"/"+str(value)+"/' server.properties")
def modifyLog(expression, value):
print('Modifying log4j2.xml '+expression+' with value '+str(value))
os.system("sed -i 's/"+str(expression)+"/"+str(value)+"/' log4j2.xml")
def main():
mongoHosts = os.environ['mongo_addresses'].split(',')
mongoDB = os.environ['mongo_database']
mongoUsername = os.getenv('mongo_username', None)
mongoPassword = os.getenv('mongo_password', None)
client = MongoClient(mongoHosts)
db = client[mongoDB]
if mongoUsername is not None:
db.authenticate(mongoUsername, mongoPassword)
serverCollection = db['servers']
servertypesCollection = db['servertypes']
nodesCollection = db['nodes']
worldsCollection = db['worlds']
pluginsCollection = db['plugins']
query = {"_id": ObjectId(os.environ['server_id'])}
server = serverCollection.find_one(query)
query = {"_id": ObjectId(server['server_type_id'])}
servertype = servertypesCollection.find_one(query)
query = {"_id": ObjectId(server['node_id'])}
node = nodesCollection.find_one(query)
if servertype is None:
print('No server type found')
sys.exit(1)
worlds = []
plugins = []
if 'worlds' in servertype:
for worldInfo in servertype['worlds']:
world = worldsCollection.find_one({"_id": ObjectId(worldInfo['world_id'])})
worldVersion = None
if 'versions' in world and 'worldversion_id' in worldInfo:
for version in world['versions']:
if version['_id'] == ObjectId(worldInfo['worldversion_id']):
worldVersion = version
break
default = worldInfo['defaultWorld']
worldDict = {'world': world, 'version': worldVersion, 'default': default}
worlds.append(worldDict)
if 'plugins' in servertype:
for pluginInfo in servertype['plugins']:
plugin = pluginsCollection.find_one({"_id": ObjectId(pluginInfo['plugin_id'])})
pluginConfig = None
pluginVersion = None
if 'configs' in plugin and 'pluginconfig_id' in pluginInfo:
for config in plugin['configs']:
if config['_id'] == ObjectId(pluginInfo['pluginconfig_id']):
pluginConfig = config
break
if 'versions' in plugin and 'pluginversion_id' in pluginInfo:
for version in plugin['versions']:
if version['_id'] == ObjectId(pluginInfo['pluginversion_id']):
pluginVersion = version
break
pluginDict = {'plugin': plugin, 'version': pluginVersion, 'config': pluginConfig}
plugins.append(pluginDict)
print('Copying Main Server files')
os.system('cp -R /mnt/minestack/server/bukkit/* .')
defaultWorld = None
os.system('mkdir worlds')
for worldInfo in worlds:
world = worldInfo['world']
version = worldInfo['version']
default = worldInfo['default']
print('Copying world '+world['name'])
if version is None:
print('World '+world['name']+' has no version. Skipping')
continue
if default is True:
defaultWorld = world
os.system('mkdir worlds/'+world['directory'])
os.system('cp -R /mnt/minestack/worlds/'+world['directory']+'/versions/'+version['version']+'/* worlds/'+world['directory'])
os.system('ls -l worlds')
if defaultWorld is None:
print('No default world set')
sys.exit(1)
# modify server config for default world
modifyConfig('levelname', defaultWorld['name'])
os.system('mkdir plugins')
for pluginInfo in plugins:
plugin = pluginInfo['plugin']
version = pluginInfo['version']
config = pluginInfo['config']
print('Copying plugin '+plugin['name'])
if version is None:
print('Plugin '+plugin['name']+' has no version. Skipping')
continue
if config is not None:
os.system('mkdir plugins/'+plugin['directory'])
os.system('cp -R /mnt/minestack/plugins/'+plugin['directory']+'/configs/'+config['directory']+'/* plugins/'+plugin['directory'])
os.system('cp -R /mnt/minestack/plugins/'+plugin['directory']+'/versions/'+version['version']+'/* plugins')
os.system('ls -l plugins')
# modify server config for num of players
modifyConfig('maxplayers', servertype['players'])
# modify server config for server name
modifyConfig('servername', servertype['name']+'.'+str(server['number']))
modifyLog('SYS_HOST', node['privateAddress'])
modifyLog('SERVERTYPE', servertype['name'])
modifyLog('NUMBER', server['number'])
os.system('touch .update-lock')
os.system('ls -l')
os.system("chmod +x start.sh")
os.system("./start.sh "+str(servertype['ram']))
main()
| mit |
timgrossmann/instagram-profilecrawl | extract_image.py | 1 | 1358 | import sys
import os
import wget
from util.download_image_post import DownloadImagePost
import json
from util.settings import BASE_DIR
def main(arguments):
profiles_path = arguments[1] if len(arguments) > 1 else "profiles"
profile_list = os.listdir(profiles_path)
for profile in profile_list:
file_path = os.path.join(BASE_DIR, profiles_path, profile)
file_name, file_extension = os.path.splitext(file_path)
if file_extension == ".json": # check file is json
f = open(file_path, "r")
data = json.loads(f.read())
if data is not None or data is not []:
username = data.get("username", "")
if data.get("posts") is not None:
images = [val for post in data.get("posts", []) for (key, val) in post.items() if
key == "preview_img"]
image_downloader = DownloadImagePost('images/{}'.format(username))
for img in images:
# TODO: Implement download Image URL here
image_downloader.extract(img[4].get("src"))
else:
print("This user doesn't have any post(s) yet.")
else:
print("Unsupported file type")
if __name__ == "__main__":
args = sys.argv
main(args)
| mit |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/support/backend/atlassian.py | 1 | 17067 | import collections
import json
import logging
from datetime import datetime
from html.parser import HTMLParser
import dateutil.parser
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.template import Context, Template
from django.utils import timezone
from jira import Comment, JIRAError
from jira.utils import json_loads
from waldur_core.structure import ServiceBackendError
from waldur_jira.backend import JiraBackend, reraise_exceptions
from waldur_mastermind.support import models
from waldur_mastermind.support.exceptions import SupportUserInactive
from . import SupportBackend
logger = logging.getLogger(__name__)
Settings = collections.namedtuple(
'Settings', ['backend_url', 'username', 'password', 'email', 'token']
)
class ServiceDeskBackend(JiraBackend, SupportBackend):
servicedeskapi_path = 'servicedeskapi'
model_comment = models.Comment
model_issue = models.Issue
model_attachment = models.Attachment
def __init__(self):
self.settings = Settings(
backend_url=settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('server'),
username=settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('username'),
password=settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('password'),
email=settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('email'),
token=settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('token'),
)
self.verify = settings.WALDUR_SUPPORT.get('CREDENTIALS', {}).get('verify_ssl')
self.project_settings = settings.WALDUR_SUPPORT.get('PROJECT', {})
# allow to define reference by ID as older SD cannot properly resolve
# TODO drop once transition to request API is complete
self.service_desk_reference = self.project_settings.get(
'key_id', self.project_settings['key']
)
self.issue_settings = settings.WALDUR_SUPPORT.get('ISSUE', {})
self.use_old_api = settings.WALDUR_SUPPORT.get('USE_OLD_API', False)
self.use_teenage_api = settings.WALDUR_SUPPORT.get('USE_TEENAGE_API', False)
# In ideal world where Atlassian SD respects its spec the setting below would not be needed
self.use_automatic_request_mapping = settings.WALDUR_SUPPORT.get(
'USE_AUTOMATIC_REQUEST_MAPPING', True
)
# In some cases list of priorities available to customers differ from the total list returned by SDK
self.pull_priorities_automatically = settings.WALDUR_SUPPORT.get(
'PULL_PRIORITIES', True
)
self.strange_setting = settings.WALDUR_SUPPORT.get('STRANGE_SETTING', 1)
def pull_service_properties(self):
super(ServiceDeskBackend, self).pull_service_properties()
self.pull_request_types()
if self.pull_priorities_automatically:
self.pull_priorities()
@reraise_exceptions
def create_comment(self, comment):
backend_comment = self._add_comment(
comment.issue.backend_id,
comment.prepare_message(),
is_internal=not comment.is_public,
)
comment.backend_id = backend_comment.id
comment.save(update_fields=['backend_id'])
def _add_comment(self, issue, body, is_internal):
data = {
'body': body,
'properties': [
{'key': 'sd.public.comment', 'value': {'internal': is_internal}},
],
}
url = self.manager._get_url('issue/{0}/comment'.format(issue))
response = self.manager._session.post(url, data=json.dumps(data))
comment = Comment(
self.manager._options, self.manager._session, raw=json_loads(response)
)
return comment
@reraise_exceptions
def create_issue(self, issue):
if not issue.caller.email:
raise ServiceBackendError(
'Issue is not created because caller user does not have email.'
)
self.create_user(issue.caller)
args = self._issue_to_dict(issue)
args['serviceDeskId'] = self.manager.waldur_service_desk(
self.service_desk_reference
)
if not models.RequestType.objects.filter(issue_type_name=issue.type).count():
self.pull_request_types()
if not models.RequestType.objects.filter(issue_type_name=issue.type).count():
raise ServiceBackendError(
'Issue is not created because corresponding request type is not found.'
)
args['requestTypeId'] = (
models.RequestType.objects.filter(issue_type_name=issue.type)
.first()
.backend_id
)
backend_issue = self.manager.waldur_create_customer_request(
args, use_old_api=self.use_old_api
)
args = self._get_custom_fields(issue)
try:
# Update an issue, because create_customer_request doesn't allow setting custom fields.
backend_issue.update(**args)
except JIRAError as e:
logger.error('Error when setting custom field via JIRA API: %s' % e)
self._backend_issue_to_issue(backend_issue, issue)
issue.save()
def create_confirmation_comment(self, issue):
try:
tmpl = models.TemplateConfirmationComment.objects.get(issue_type=issue.type)
except models.TemplateConfirmationComment.DoesNotExist:
try:
tmpl = models.TemplateConfirmationComment.objects.get(
issue_type='default'
)
except models.TemplateConfirmationComment.DoesNotExist:
logger.debug(
'A confirmation comment hasn\'t been created, because a template does not exist.'
)
return
body = (
Template(tmpl.template)
.render(Context({'issue': issue}, autoescape=False))
.strip()
)
return self._add_comment(issue.backend_id, body, is_internal=False)
def create_user(self, user):
# Temporary workaround as JIRA returns 500 error if user already exists
if self.use_old_api or self.use_teenage_api:
# old API has a bug that causes user active status to be set to False if includeInactive is passed as True
existing_support_user = self.manager.search_users(user.email)
else:
# user GDPR-compliant version of user search
existing_support_user = self.manager.waldur_search_users(
user.email, includeInactive=True
)
if existing_support_user:
active_user = [u for u in existing_support_user if u.active]
if not active_user:
raise SupportUserInactive(
'Issue is not created because caller user is disabled.'
)
logger.debug(
'Skipping user %s creation because it already exists', user.email
)
backend_customer = active_user[0]
else:
if self.use_old_api:
backend_customer = self.manager.waldur_create_customer(
user.email, user.full_name
)
else:
backend_customer = self.manager.create_customer(
user.email, user.full_name
)
try:
user.supportcustomer
except ObjectDoesNotExist:
support_customer = models.SupportCustomer(
user=user, backend_id=self.get_user_id(backend_customer)
)
support_customer.save()
@reraise_exceptions
def get_users(self):
users = self.manager.search_assignable_users_for_projects(
'', self.project_settings['key'], maxResults=False
)
return [
models.SupportUser(name=user.displayName, backend_id=self.get_user_id(user))
for user in users
]
def _get_custom_fields(self, issue):
args = {}
if issue.reporter:
args[
self.get_field_id_by_name(self.issue_settings['reporter_field'])
] = issue.reporter.name
if issue.impact:
args[
self.get_field_id_by_name(self.issue_settings['impact_field'])
] = issue.impact
if issue.priority:
args['priority'] = {'name': issue.priority}
def set_custom_field(field_name, value):
if value and self.issue_settings.get(field_name):
args[self.get_field_id_by_name(self.issue_settings[field_name])] = value
if issue.customer:
set_custom_field('organisation_field', issue.customer.name)
if issue.project:
set_custom_field('project_field', issue.project.name)
if issue.resource:
set_custom_field('affected_resource_field', issue.resource)
if issue.template:
set_custom_field('template_field', issue.template.name)
return args
def _issue_to_dict(self, issue):
parser = HTMLParser()
args = {
'requestFieldValues': {
'summary': parser.unescape(issue.summary),
'description': parser.unescape(issue.description),
}
}
if issue.priority:
args['requestFieldValues']['priority'] = {'name': issue.priority}
support_customer = issue.caller.supportcustomer
args['requestParticipants'] = [support_customer.backend_id]
return args
def _get_first_sla_field(self, backend_issue):
field_name = self.get_field_id_by_name(self.issue_settings['sla_field'])
value = getattr(backend_issue.fields, field_name, None)
if value and hasattr(value, 'ongoingCycle'):
epoch_milliseconds = value.ongoingCycle.breachTime.epochMillis
if epoch_milliseconds:
return datetime.fromtimestamp(
epoch_milliseconds / 1000.0, timezone.get_default_timezone()
)
def _backend_issue_to_issue(self, backend_issue, issue):
issue.key = backend_issue.key
issue.backend_id = backend_issue.key
issue.resolution = (
backend_issue.fields.resolution and backend_issue.fields.resolution.name
) or ''
issue.status = backend_issue.fields.status.name or ''
issue.link = backend_issue.permalink()
issue.priority = backend_issue.fields.priority.name
issue.first_response_sla = self._get_first_sla_field(backend_issue)
issue.summary = backend_issue.fields.summary
issue.description = backend_issue.fields.description or ''
issue.type = backend_issue.fields.issuetype.name
issue.resolution_date = backend_issue.fields.resolutiondate or None
def get_support_user_by_field(fields, field_name):
backend_user = getattr(fields, field_name, None)
if backend_user:
return self.get_or_create_support_user(backend_user)
impact_field_id = self.get_field_id_by_name(self.issue_settings['impact_field'])
impact = getattr(backend_issue.fields, impact_field_id, None)
if impact:
issue.impact = impact
assignee = get_support_user_by_field(backend_issue.fields, 'assignee')
if assignee:
issue.assignee = assignee
reporter = get_support_user_by_field(backend_issue.fields, 'reporter')
if reporter:
issue.reporter = reporter
def get_or_create_support_user(self, user):
user_id = self.get_user_id(user)
if user_id:
author, _ = models.SupportUser.objects.get_or_create(backend_id=user_id)
return author
def get_user_id(self, user):
try:
if self.use_old_api:
return user.name # alias for username
else:
return user.key
except AttributeError:
return user.accountId
except TypeError:
return
def _backend_comment_to_comment(self, backend_comment, comment):
comment.update_message(backend_comment.body)
comment.author = self.get_or_create_support_user(backend_comment.author)
try:
internal = self._get_property(
'comment', backend_comment.id, 'sd.public.comment'
)
comment.is_public = not internal.get('value', {}).get('internal', False)
except JIRAError:
# workaround for backbone-issue-sync-for-jira plugin
external = self._get_property(
'comment', backend_comment.id, 'sd.allow.public.comment'
)
comment.is_public = external.get('value', {}).get('allow', False)
def _backend_attachment_to_attachment(self, backend_attachment, attachment):
attachment.mime_type = getattr(backend_attachment, 'mimeType', '')
attachment.file_size = backend_attachment.size
attachment.created = dateutil.parser.parse(backend_attachment.created)
attachment.author = self.get_or_create_support_user(backend_attachment.author)
@reraise_exceptions
def pull_request_types(self):
service_desk_id = self.manager.waldur_service_desk(self.service_desk_reference)
# backend_request_types = self.manager.request_types(service_desk_id)
backend_request_types = self.manager.waldur_request_types(
service_desk_id, self.project_settings['key'], self.strange_setting
)
with transaction.atomic():
backend_request_type_map = {
int(request_type.id): request_type
for request_type in backend_request_types
}
waldur_request_type = {
request_type.backend_id: request_type
for request_type in models.RequestType.objects.all()
}
# cleanup request types if automatic request mapping is done
if self.use_automatic_request_mapping:
stale_request_types = set(waldur_request_type.keys()) - set(
backend_request_type_map.keys()
)
models.RequestType.objects.filter(
backend_id__in=stale_request_types
).delete()
for backend_request_type in backend_request_types:
defaults = {
'name': backend_request_type.name,
}
if self.use_automatic_request_mapping:
issue_type = self.manager.issue_type(
backend_request_type.issueTypeId
)
defaults['issue_type_name'] = issue_type.name
models.RequestType.objects.update_or_create(
backend_id=backend_request_type.id, defaults=defaults,
)
@reraise_exceptions
def pull_priorities(self):
backend_priorities = self.manager.priorities()
with transaction.atomic():
backend_priorities_map = {
priority.id: priority for priority in backend_priorities
}
waldur_priorities = {
priority.backend_id: priority
for priority in models.Priority.objects.all()
}
stale_priorities = set(waldur_priorities.keys()) - set(
backend_priorities_map.keys()
)
models.Priority.objects.filter(backend_id__in=stale_priorities).delete()
for priority in backend_priorities:
models.Priority.objects.update_or_create(
backend_id=priority.id,
defaults={
'name': priority.name,
'description': priority.description,
'icon_url': priority.iconUrl,
},
)
def create_issue_links(self, issue, linked_issues):
for linked_issue in linked_issues:
link_type = self.issue_settings['type_of_linked_issue']
self.manager.create_issue_link(link_type, issue.key, linked_issue.key)
def create_feedback(self, feedback):
if feedback.comment:
support_user, _ = models.SupportUser.objects.get_or_create_from_user(
feedback.issue.caller
)
comment = models.Comment.objects.create(
issue=feedback.issue,
description=feedback.comment,
is_public=False,
author=support_user,
)
self.create_comment(comment)
if feedback.evaluation:
field_name = self.get_field_id_by_name(
self.issue_settings['satisfaction_field']
)
backend_issue = self.get_backend_issue(feedback.issue.backend_id)
kwargs = {field_name: feedback.get_evaluation_display()}
backend_issue.update(**kwargs)
| mit |
urashima9616/Leetcode_Python | Leet114_FlattenBinaryTree.py | 1 | 1614 | """
Given a binary tree, flatten it to a linked list in-place.
For example,
Given
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if not root:
return
self.BuildFlatten(root)
def BuildFlatten(self, root):
if not root:
return
if not root.right and not root.left:
return root
if not root.left:
return self.BuildFlatten(root.right)
if not root.right:
root.right = root.left
root.left = None
return self.BuildFlatten(root.right)
anchor = root.right
leaveleft = self.BuildFlatten(root.left)
root.right = root.left
root.left = None
#plant the right subtree to the leave of subleft tree
leaveleft.right = anchor
#Get the leave node of right subtree
leaveright = self.BuildFlatten(anchor)
return leaveright
Solve = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(5)
root.left.left = TreeNode(3)
root.left.right = TreeNode(4)
root.right.right = TreeNode(6)
Solve.flatten(root) | gpl-3.0 |
vorlock/ansible-modules-core | database/postgresql/postgresql_user.py | 25 | 20683 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_user
short_description: Adds or removes a users (roles) from a PostgreSQL database.
description:
- Add or remove PostgreSQL users (roles) from a remote host and, optionally,
grant the users access to an existing database or tables.
- The fundamental function of the module is to create, or delete, roles from
a PostgreSQL cluster. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password, before 1.4 this was required.
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\")."
required: false
default: null
db:
description:
- name of database where permissions will be granted
required: false
default: null
fail_on_user:
description:
- if C(yes), fail when user can't be removed. Otherwise just log and continue
required: false
default: 'yes'
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User (role) used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL
required: false
default: null
login_host:
description:
- Host running PostgreSQL.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
required: false
default: null
role_attr_flags:
description:
- "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER"
required: false
default: null
choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
state:
description:
- The user (role) state
required: false
default: present
choices: [ "present", "absent" ]
encrypted:
description:
- denotes if the password is already encrypted. boolean.
required: false
default: false
version_added: '1.4'
expires:
description:
- sets the user's password expiration.
required: false
default: null
version_added: '1.4'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- If you specify PUBLIC as the user, then the privilege changes will apply
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: Lorin Hochstein
'''
EXAMPLES = '''
# Create django user and grant access to database and products table
- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL
# Create rails user, grant privilege to create other databases and demote rails from super user status
- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER
# Remove test user privileges from acme
- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no
# Remove test user from test database and the cluster
- postgresql_user: db=test name=test priv=ALL state=absent
# Example privileges string format
INSERT,UPDATE/table:SELECT/anothertable:ALL
# Remove an existing user's password
- postgresql_user: db=test user=test password=NULL
'''
import re
import itertools
try:
import psycopg2
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')),
)
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None:
query.append("WITH %(crypt)s" % { "crypt": encrypted })
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data)
return True
def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if password is not None or role_attr_flags is not None:
# Select password and all flag-like columns in order to verify changes.
query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if password is not None:
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
try:
cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError, e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror)
return changed
else:
raise psycopg2.InternalError, e
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
for i in range(len(current_role_attrs)):
if current_role_attrs[i] != new_role_attrs[i]:
changed = True
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privilege(cursor, user, table, priv):
query = 'SELECT has_table_privilege(%s, %s, %s)'
cursor.execute(query, (user, table, priv))
return cursor.fetchone()[0]
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = '''SELECT privilege_type FROM information_schema.role_table_grants
WHERE grantee=%s AND table_name=%s AND table_schema=%s'''
cursor.execute(query, (user, table, schema))
return set([x[0] for x in cursor.fetchall()])
def grant_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
query = 'GRANT %s ON TABLE %s TO %s' % (
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) > len(prev_priv)
def revoke_table_privilege(cursor, user, table, priv):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) < len(prev_priv)
def get_database_privileges(cursor, user, db):
priv_map = {
'C':'CREATE',
'T':'TEMPORARY',
'c':'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return []
r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl)
if r is None:
return []
o = []
for v in r.group(1):
o.append(priv_map[v])
return o
def has_database_privilege(cursor, user, db, priv):
query = 'SELECT has_database_privilege(%s, %s, %s)'
cursor.execute(query, (user, db, priv))
return cursor.fetchone()[0]
def grant_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) > len(prev_priv)
def revoke_database_privilege(cursor, user, db, priv):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
priv, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) < len(prev_priv)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
changed = False
for type_ in privs:
revoke_func = {
'table':revoke_table_privilege,
'database':revoke_database_privilege
}[type_]
for name, privileges in privs[type_].iteritems():
for privilege in privileges:
changed = revoke_func(cursor, user, name, privilege)\
or changed
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
changed = False
for type_ in privs:
grant_func = {
'table':grant_table_privilege,
'database':grant_database_privilege
}[type_]
for name, privileges in privs[type_].iteritems():
for privilege in privileges:
changed = grant_func(cursor, user, name, privilege)\
or changed
return changed
def parse_role_attrs(role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
"""
if ',' in role_attr_flags:
flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
elif role_attr_flags:
flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database':{},
'table':{}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
o_privs[type_][name] = priv_set
return o_privs
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
db=dict(default=''),
port=dict(default='5432'),
fail_on_user=dict(type='bool', default='yes'),
role_attr_flags=dict(default=''),
encrypted=dict(type='bool', default='no'),
expires=dict(default=None)
),
supports_check_mode = True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
db = module.params["db"]
if db == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db)
port = module.params["port"]
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError, e:
module.fail_json(msg=str(e))
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError, e:
module.fail_json(msg=str(e))
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()
| gpl-3.0 |
mitocw/edx-platform | openedx/features/discounts/tests/test_applicability.py | 1 | 7268 | """Tests of openedx.features.discounts.applicability"""
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import ddt
import pytz
from django.contrib.sites.models import Site
from django.utils.timezone import now
from enterprise.models import EnterpriseCustomer, EnterpriseCustomerUser
from mock import Mock, patch
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from entitlements.tests.factories import CourseEntitlementFactory
from experiments.models import ExperimentData
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.features.discounts.models import DiscountRestrictionConfig
from openedx.features.discounts.utils import REV1008_EXPERIMENT_ID
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..applicability import DISCOUNT_APPLICABILITY_FLAG, _is_in_holdback, can_receive_discount
@ddt.ddt
class TestApplicability(ModuleStoreTestCase):
"""
Applicability determines if this combination of user and course can receive a discount. Make
sure that all of the business conditions work.
"""
def setUp(self):
super(TestApplicability, self).setUp()
self.site, _ = Site.objects.get_or_create(domain='example.com')
self.user = UserFactory.create()
self.course = CourseFactory.create(run='test', display_name='test')
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
now_time = datetime.now(tz=pytz.UTC).strftime(u"%Y-%m-%d %H:%M:%S%z")
ExperimentData.objects.create(
user=self.user, experiment_id=REV1008_EXPERIMENT_ID, key=str(self.course), value=now_time
)
holdback_patcher = patch('openedx.features.discounts.applicability._is_in_holdback', return_value=False)
self.mock_holdback = holdback_patcher.start()
self.addCleanup(holdback_patcher.stop)
def test_can_receive_discount(self):
# Right now, no one should be able to receive the discount
applicability = can_receive_discount(user=self.user, course=self.course)
self.assertEqual(applicability, False)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_course_requirements(self):
"""
Ensure first purchase offer banner only displays for courses with a non-expired verified mode
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
applicability = can_receive_discount(user=self.user, course=self.course)
self.assertEqual(applicability, True)
no_verified_mode_course = CourseFactory(end=now() + timedelta(days=30))
applicability = can_receive_discount(user=self.user, course=no_verified_mode_course)
self.assertEqual(applicability, False)
course_that_has_ended = CourseFactory(end=now() - timedelta(days=30))
applicability = can_receive_discount(user=self.user, course=course_that_has_ended)
self.assertEqual(applicability, False)
disabled_course = CourseFactory()
CourseModeFactory.create(course_id=disabled_course.id, mode_slug='verified')
disabled_course_overview = CourseOverview.get_from_id(disabled_course.id)
DiscountRestrictionConfig.objects.create(disabled=True, course=disabled_course_overview)
applicability = can_receive_discount(user=self.user, course=disabled_course)
self.assertEqual(applicability, False)
@ddt.data(*(
[[]] +
[[mode] for mode in CourseMode.ALL_MODES] +
[
[mode1, mode2]
for mode1 in CourseMode.ALL_MODES
for mode2 in CourseMode.ALL_MODES
if mode1 != mode2
]
))
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_previous_verified_enrollment(self, existing_enrollments):
"""
Ensure that only users who have not already purchased courses receive the discount.
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
for mode in existing_enrollments:
CourseEnrollmentFactory.create(mode=mode, user=self.user)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability == all(mode in CourseMode.UPSELL_TO_VERIFIED_MODES for mode in existing_enrollments)
@ddt.data(
None,
CourseMode.VERIFIED,
CourseMode.PROFESSIONAL,
)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_entitlement(self, entitlement_mode):
"""
Ensure that only users who have not already purchased courses receive the discount.
"""
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
if entitlement_mode is not None:
CourseEntitlementFactory.create(mode=entitlement_mode, user=self.user)
applicability = can_receive_discount(user=self.user, course=self.course)
assert applicability == (entitlement_mode is None)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_can_receive_discount_false_enterprise(self):
"""
Ensure that enterprise users do not receive the discount.
"""
enterprise_customer = EnterpriseCustomer.objects.create(
name='Test EnterpriseCustomer',
site=self.site
)
EnterpriseCustomerUser.objects.create(
user_id=self.user.id,
enterprise_customer=enterprise_customer
)
applicability = can_receive_discount(user=self.user, course=self.course)
self.assertEqual(applicability, False)
@override_waffle_flag(DISCOUNT_APPLICABILITY_FLAG, active=True)
def test_holdback_denies_discount(self):
"""
Ensure that users in the holdback do not receive the discount.
"""
self.mock_holdback.return_value = True
applicability = can_receive_discount(user=self.user, course=self.course)
assert not applicability
@ddt.data(
(0, True),
(1, False),
)
@ddt.unpack
def test_holdback_group_ids(self, group_number, in_holdback):
with patch('openedx.features.discounts.applicability.stable_bucketing_hash_group', return_value=group_number):
assert _is_in_holdback(self.user) == in_holdback
def test_holdback_expiry(self):
with patch('openedx.features.discounts.applicability.stable_bucketing_hash_group', return_value=0):
with patch(
'openedx.features.discounts.applicability.datetime',
Mock(now=Mock(return_value=datetime(2020, 8, 1, 0, 1, tzinfo=pytz.UTC)), wraps=datetime),
):
assert not _is_in_holdback(self.user)
| agpl-3.0 |
bielawb/WPSDSCLinux | dsc/Providers/Scripts/3.x/Scripts/nxService.py | 1 | 48920 | #!/usr/bin/env python
# ===================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ===================================
from contextlib import contextmanager
import subprocess
import os
import sys
import glob
import codecs
import imp
import time
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# [key] string Name;
# [write,required,ValueMap{"init", "upstart", "systemd"},
# Values{"init","upstart","systemd"}] string Controller;
# [write] boolean Enabled;
# [write,ValueMap{"Running", "Stopped"},Values{"Running",
# "Stopped"}] string State;
# [read] string Path;
global show_mof
show_mof = False
def init_vars(Name, Controller, Enabled, State):
if Name is None:
Name = ''
if Controller is None:
Controller = ''
if Enabled is None:
Enabled = False
Enabled = (Enabled == True)
if State is None:
State = ''
return Name, Controller.lower(), Enabled, State.lower()
def Set_Marshall(Name, Controller, Enabled, State):
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = Set(Name, Controller, Enabled, State)
return retval
def Test_Marshall(Name, Controller, Enabled, State):
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = Test(Name, Controller, Enabled, State)
return retval
def Get_Marshall(Name, Controller, Enabled, State):
arg_names = list(locals().keys())
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
retval = 0
(retval, Name, Controller, Enabled, State, Path) = Get(
Name, Controller, Enabled, State)
Name = protocol.MI_String(Name)
Controller = protocol.MI_String(Controller)
Enabled = protocol.MI_Boolean(Enabled)
State = protocol.MI_String(State)
Path = protocol.MI_String(Path)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
# ##########################
# Begin user defined DSC functions
# ##########################
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, Name, Controller, Enabled, State):
if not show_mof:
return
mof = ''
mof += op + ' nxService MyService'
mof += '{\n'
mof += ' Name = "' + Name + '"\n'
mof += ' Controller = "' + Controller + '"\n'
mof += ' Enabled = ' + str(Enabled) + '\n'
mof += ' State = "' + State + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = codecs.open(filename, encoding='utf-8', mode=mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
enEnv = os.environ.copy()
enEnv["LANG"] = "en_US.UTF8"
process = subprocess.Popen(stdout=out_file, env=enEnv, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
output=b''
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.STDOUT, shell=True)
if output is None:
output=b''
except subprocess.CalledProcessError as e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii','ignore'), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command result was '
+ (e.output[:-1]).decode('ascii','ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('ascii','ignore')
if no_output:
return 0, None
else:
return 0, output.decode('ascii','ignore')
systemctl_path = "/usr/bin/systemctl"
upstart_start_path = "/sbin/start"
upstart_stop_path = "/sbin/stop"
upstart_status_path = "/sbin/status"
initd_service = "/sbin/service"
initd_service_partial = "/etc/init.d/"
initd_chkconfig = "/sbin/chkconfig"
initd_invokerc = "/usr/sbin/invoke-rc.d"
initd_updaterc = "/usr/sbin/update-rc.d"
lsb_install_initd = "/usr/lib/lsb/install_initd"
lsb_remove_initd = "/usr/lib/lsb/remove_initd"
runlevel_path = "/sbin/runlevel"
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
with opened_w_error(path, 'rb') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
d = F.read()
return d, error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
with opened_w_error(path, 'wb+') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
F.write(contents)
return error
def Process(params, no_output=False):
line = ''
spc = ''
for p in params:
line += (spc + p)
if len(spc) is 0:
spc = ' '
code, out = RunGetOutput(line, no_output, False)
return (out, out, code)
def StartService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "start", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " start " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" start " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_start_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_start_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + sc.Name + " start failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" start failed: " + process_stderr)
return [-1]
return [0]
def StopService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "stop", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + systemctl_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_stop_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_stop_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + sc.Name + " stop failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" stop failed: " + process_stderr)
return [-1]
return [0]
def GetRunLevel():
(process_stdout, process_stderr, retval) = Process([runlevel_path])
if retval is not 0:
Print("Error: " + runlevel_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + runlevel_path + " failed: " + process_stderr)
return -1
tokens = process_stdout.split(" ")
if len(tokens) is not 2:
Print("Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout, file=sys.stderr)
LG().Log('ERROR', "Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout)
return -1
return int(tokens[1])
def DetermineInitState(stdout):
if "is running" in stdout or "start/running" in stdout \
or "..running" in stdout:
return True
elif stdout.strip() == "running":
return True
elif "(running)" in stdout:
return True
else:
return False
def DetermineInitEnabled(stdout, runlevel):
tokens = stdout.split()
tokens = tokens[1:]
if runlevel > (len(tokens) - 1):
Print("runlevel " + str(runlevel) +
" not found in chkconfig", file=sys.stderr)
LG().Log(
'ERROR', "runlevel " + str(runlevel) + " not found in chkconfig")
return False
runlevel_tokens = tokens[runlevel].split(":")
if len(runlevel_tokens) is not 2:
Print(
"Unable to determine format for chkconfig run level",
file=sys.stderr)
LG().Log(
'ERROR', "Unable to determine format for chkconfig run level")
return False
if runlevel_tokens[1] == "on":
return True
else:
return False
def GetSystemdState(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is 0:
if '(running)' in process_stdout:
return "running"
return "stopped"
def TestSystemdState(sc):
if sc.State and sc.State != GetSystemdState(sc):
return False
return True
def GetSystemdEnabled(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "is-enabled", sc.Name])
if retval is 0:
return True
else:
return False
def TestSystemdEnabled(sc):
if sc.Enabled is not GetSystemdEnabled(sc):
return False
return True
def TestSystemd(sc):
if not SystemdExists():
return [-1]
if not TestSystemdState(sc):
return [-1]
if not TestSystemdEnabled(sc):
return [-1]
return [0]
def GetUpstartState(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_status_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_status_path +
" failed: " + process_stderr)
return ""
if (sc.Name + " start") in process_stdout:
return "running"
else:
return "stopped"
def TestUpstartState(sc):
if sc.State and sc.State != GetUpstartState(sc):
return False
return True
def GetUpstartEnabled(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
start_on_exists = False
start_on_is_enabled = False
stop_on_exists = False
stop_on_is_enabled = False
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if error is not None:
Print(
"Error reading:/etc/init/" + sc.Name + ".conf",
file=sys.stderr)
LG().Log('ERROR', "Error reading:/etc/init/" +
sc.Name + ".conf")
return "Error"
for full_line in file_lines.splitlines():
# everything after a '#' character is a comment, so strip it off
line = full_line.split("#")[0]
if "start on" in line:
start_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "start on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
start_on_is_enabled = True
else:
start_on_is_enabled = False
if "!" in specified_runlevel_digits:
start_on_is_enabled = not start_on_is_enabled
else:
return "Complex"
if "stop on" in line:
stop_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "stop on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
stop_on_is_enabled = True
else:
stop_on_is_enabled = False
if "!" in specified_runlevel_digits:
stop_on_is_enabled = not stop_on_is_enabled
else:
return "Complex"
if not start_on_exists and not stop_on_exists: # not upstart
if os.path.islink('/etc/init.d/' + sc.Name) and \
os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc2.d
# for smylink to conf file. if so its enabled.
file_list = os.listdir('/etc/rc2.d')
for f in file_list:
f = '/etc/rc2.d/' + f
if os.path.islink(f) and os.readlink(f) == \
"../init.d/" + sc.Name:
return True
return False
(process_stdout, process_stderr, retval) = Process(
['chkconfig', sc.Name, '']) # try init style
if retval is 0:
if 'off' not in process_stdout:
return True
return False
if start_on_exists and start_on_is_enabled:
if stop_on_exists and stop_on_is_enabled:
Print("Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.", file=sys.stderr)
LG().Log('ERROR',
"Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.")
return "Complex"
else:
return True
else:
return False
Print("Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf", file=sys.stderr)
LG().Log('ERROR',
"Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf")
return False
else:
Print("Error: conf file does not exist for service named " +
sc.Name, file=sys.stderr)
LG().Log('ERROR',
"Error: conf file does not exist for service named " +
sc.Name)
return False
def TestUpstartEnabled(sc):
currently_enabled = GetUpstartEnabled(sc)
if currently_enabled == "Complex":
Print("Error: Cannot modify 'Enabled' state for service " + sc.Name +
", conf file too complex. Please use the File provider to " +
"write your own conf file for this service.", file=sys.stderr)
LG().Log('ERROR', "Error: Cannot modify 'Enabled' state for service "
+ sc.Name +
", conf file too complex. Please use the File provider to " +
" writeyour own conf file for this service.")
return False
return currently_enabled
def TestUpstart(sc):
if not UpstartExists():
return [-1]
if not TestUpstartState(sc):
return [-1]
if sc.Enabled is not TestUpstartEnabled(sc):
return [-1]
return [0]
def GetInitState(sc):
check_state_program = initd_service_partial + sc.Name
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = '/usr/sbin/service'
if os.path.isfile('/usr/sbin/service'):
check_state_program = '/usr/sbin/service'
else: # invoke the service directly
check_state_program = '/etc/init.d/'
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "status"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " status failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " status failed: ")
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if retval is not 0:
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
if DetermineInitState(process_stdout):
return "running"
else:
return "stopped"
def TestInitState(sc):
if sc.State and sc.State != GetInitState(sc):
return False
return True
def GetInitEnabled(sc):
runlevel = GetRunLevel()
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
# A service is enabled if a symbolic link
# exists in /etc/rc${RUNLEVEL}.d/ with the name:
# S??${sc.Name}
matched_files = glob.glob(
"/etc/rc" + str(runlevel) + ".d/S??" + sc.Name)
for f in matched_files:
if os.path.islink(f):
return True
return False
else:
check_enabled_program = initd_chkconfig
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "--list", sc.Name])
if retval is not 0:
Print("Error: " + check_enabled_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" failed: " + process_stderr)
return False
if DetermineInitEnabled(process_stdout, runlevel):
return True
else:
return False
def TestInitEnabled(sc):
if sc.Enabled is not GetInitEnabled(sc):
return False
return True
def TestInit(sc):
if not InitExists():
return [-1]
if not TestInitState(sc):
return [-1]
if not TestInitEnabled(sc):
return [-1]
return [0]
def SystemdExists():
global systemctl_path
code, out = RunGetOutput('which systemctl', False, False)
if code is 0:
systemctl_path = out.strip()
return True
else:
return False
def UpstartExists():
if (os.path.isfile('/sbin/upstart-local-bridge')
or os.path.isfile('/sbin/upstart-udev-bridge')) \
and os.path.isfile(upstart_start_path) \
and os.path.isfile(upstart_stop_path) \
and os.path.isfile(upstart_status_path):
return True
else:
return False
def InitExists():
if os.path.isfile(initd_service) and os.path.isfile(initd_chkconfig):
return True
elif os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
return True
else:
return False
def ServiceExistsInSystemd(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is not 0:
if "Loaded: loaded" in process_stdout:
return True
else:
return False
else:
return True
def ServiceExistsInUpstart(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
return False
else:
return True
def ServiceExistsInInit(sc):
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if "unrecognized service" in process_stderr \
or "no such service" in process_stderr:
Print(process_stderr, file=sys.stderr)
LG().Log('INFO', process_stderr)
return False
else:
return True
def CreateSystemdService(sc):
Print("Error: systemd services cannot be created from the service " +
"provider. Please use the file provider to create a systemd " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: systemd services cannot be created from the service provider. \
Please use the file provider to create a systemd conf file, \
then modify the service using this service provider.")
return [-1]
def ModifySystemdService(sc):
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "enable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " enable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" enable " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "disable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " disable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" disable " + sc.Name + " failed: " + process_stderr)
return [-1]
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name + '.service'])
# retval may be non zero even if service exists for 'status'.
if 'No such file or directory' in process_stdout:
Print("Error: " + systemctl_path + " status " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" status " + sc.Name + " failed: " + process_stderr)
return [-1]
if 'Active: active' in process_stdout:
Print("Running", file=sys.stderr)
LG().Log('INFO', "Running")
if sc.State and sc.State != "running":
return StopService(sc)
else:
Print("Stopped", file=sys.stderr)
LG().Log('INFO', "Stopped")
if sc.State and sc.State != "stopped":
return StartService(sc)
return [0]
def CreateUpstartService(sc):
Print("Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.")
return [-1]
def ModifyUpstartConfFile(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if len(file_lines) is 0 or error is not None:
Print("Error: Conf file unable to be read for service " +
sc.Name, file=sys.stderr)
LG().Log(
'ERROR', "Error: Conf file unable to be read for service " +
sc.Name)
return False
outfile = ""
start_on_exists = False
stop_on_exists = False
for full_line in file_lines.splitlines():
line = full_line.split("#")[0]
if "start on" in line or "stop on" in line and not start_on_exists:
# If we got to this point, we can assume that we're allowed to
# modify the conf file. No need to check for a "Complex" conf
# file.
start_on_exists = True
if sc.Enabled is True:
outfile += "start on runlevel [2345]\n"
outfile += "stop on runlevel [!2345]\n"
elif sc.Enabled is False:
outfile += "stop on runlevel [0123456]\n"
elif "start on" in line or "stop on" in line and start_on_exists:
continue # its xtra now
else:
outfile += full_line + "\n"
if start_on_exists or stop_on_exists:
if WriteFile("/etc/init/" + sc.Name + ".conf", outfile) \
is not None:
Print(
"Error: Unable to write conf file for service " + sc.Name,
file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to write conf file for service " +
sc.Name)
return False
return True
else: # not an upstart service
if os.path.islink('/etc/init.d/' + sc.Name) \
and os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc[2345].d
# for smylink to conf file. if so its enabled.
for rc in range(2, 6):
file_list = os.listdir('/etc/rc' + str(rc) + '.d')
found = False
for f in file_list:
f = '/etc/rc' + str(rc) + '.d/' + f
if os.path.islink(f) and os.readlink(f) \
== "../init.d/" + sc.Name:
found = True
break
if sc.Enabled is True:
if not found:
# create the symlink
os.symlink(
"../init.d/" + sc.Name, "/etc/rc2.d/S22" + sc.Name)
return True
else:
if found:
os.unlink(f)
return True
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d', sc.Name, ' defaults'])
if retval is not 0:
Print("Error: " + process_stdout + " enable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" enable " + sc.Name + " failed: " + process_stderr)
return False
else:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d -f ', sc.Name, ' remove'])
if retval is not 0:
Print("Error: " + process_stdout + " disable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" disable " + sc.Name + " failed: " + process_stderr)
return False
return True
def ModifyUpstartService(sc):
if sc.Enabled is not TestUpstartEnabled(sc):
if not ModifyUpstartConfFile(sc):
Print("Error: Failed to modify upstart conf file", file=sys.stderr)
LG().Log('ERROR', "Error: Failed to modify upstart conf file")
return [-1]
if sc.State == "running":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
if "Job is already running" not in process_stderr:
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.State == "stopped":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
if "Unknown instance" not in process_stderr:
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return [0]
def CreateInitService(sc):
(process_stdout, process_stderr, retval) = Process(
[lsb_install_initd, sc.Name])
if retval is not 0:
Print("Error: " + lsb_install_initd + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + lsb_install_initd +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return ModifyInitService(sc)
def ModifyInitService(sc):
check_state_program = initd_service
check_enabled_program = initd_chkconfig
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
if os.path.isfile('/usr/sbin/service'):
check_state_program = '/usr/sbin/service'
else: # invoke the service directly
check_state_program = '/etc/init.d/'
check_enabled_program = initd_updaterc
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "enable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " enable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " enable failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "disable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " disable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " disable failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
else:
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "on"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" on failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " on failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0 :
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "off"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" off failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " off failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
if sc.State == "running":
# don't try to read stdout or stderr as 'service start' comand
# re-directs them, causing a hang in subprocess.communicate()
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " start failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
elif sc.State == "stopped":
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "stop"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " stop failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " stop failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
return [0]
def IsServiceRunning(sc):
time.sleep(1)
cmd = 'ps -ef | grep -v grep | grep -E ".*( ' + \
sc.Name + '|/' + sc.Name + ')( |$)"'
code, out = RunGetOutput(cmd, False, False)
if code is not 0:
return False
return True
def Set(Name, Controller, Enabled, State):
ShowMof('SET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
if SystemdExists() is True:
if ServiceExistsInSystemd(sc):
return ModifySystemdService(sc)
else:
return CreateSystemdService(sc)
elif sc.Controller == "upstart":
if UpstartExists() is True:
if ServiceExistsInUpstart(sc):
return ModifyUpstartService(sc)
else:
return CreateUpstartService(sc)
elif sc.Controller == "init":
if InitExists() is True:
if ServiceExistsInInit(sc):
return ModifyInitService(sc)
else:
return CreateInitService(sc)
return [-1]
def Test(Name, Controller, Enabled, State):
ShowMof('TEST', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
return TestSystemd(sc)
elif sc.Controller == "upstart":
return TestUpstart(sc)
elif sc.Controller == "init":
return TestInit(sc)
else:
Print("Invalid service controller (" + sc.Controller +
") specified for service: " + sc.Name, file=sys.stderr)
LG().Log('ERROR', "Invalid service controller (" +
sc.Controller + ") specified for service: " + sc.Name)
return [-1]
return [-1]
def Get(Name, Controller, Enabled, State):
ShowMof('GET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
Path = ""
exit_code = 0
if not sc.Controller:
Print("Error: Controller not specified", file=sys.stderr)
LG().Log('ERROR', "Error: Controller not specified")
exit_code = -1
elif sc.Controller == "systemd":
if not ServiceExistsInSystemd(sc):
Print("Error: Unable to find service named " +
sc.Name + " in systemd.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in systemd.")
exit_code = -1
else:
Enabled = GetSystemdEnabled(sc)
State = GetSystemdState(sc)
Path = "/usr/lib/systemd/system/" + sc.Name + ".service"
elif sc.Controller == "upstart":
if not ServiceExistsInUpstart(sc):
Print("Error: Unable to find service named " +
sc.Name + " in upstart.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in upstart.")
exit_code = -1
else:
temp = GetUpstartEnabled(sc)
if temp is False:
Enabled = False
else:
# When GetUpstartEnabled returns "Complex", we assume that it
# is enabled (and we won't modify it).
Enabled = True
State = GetUpstartState(sc)
Path = "/etc/init/" + sc.Name + ".conf"
elif sc.Controller == "init":
if not ServiceExistsInInit(sc):
Print("Error: Unable to find service named " +
sc.Name + " in init.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in init.")
exit_code = -1
else:
Enabled = GetInitEnabled(sc)
State = GetInitState(sc)
Path = "/etc/init.d/" + sc.Name
return [exit_code, Name, Controller, Enabled, State, Path]
class ServiceContext:
def __init__(self, Name, Controller, Enabled, State):
if not Name:
raise Exception("Error: Service has no name.")
if not Controller:
raise Exception("Error: Controller not specified.")
self.Name = Name
self.Controller = Controller
self.Enabled = Enabled
self.State = State
self.Path = ''
| mit |
mdevaev/emonoda | emonoda/apps/emdiff.py | 1 | 2675 | """
Emonoda -- A set of tools to organize and manage your torrents
Copyright (C) 2015 Devaev Maxim <mdevaev@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import argparse
from typing import Optional
from ..plugins.clients import BaseClient
from ..helpers import tcollection
from ..tfile import Torrent
from ..tfile import get_torrents_difference
from .. import fmt
from . import init
from . import wrap_main
from . import get_configured_log
from . import get_configured_client
# ===== Main =====
@wrap_main
def main() -> None:
(parent_parser, argv, config) = init()
args_parser = argparse.ArgumentParser(
prog="emdiff",
description="Show a difference between two torrent files",
parents=[parent_parser],
)
args_parser.add_argument("-v", "--verbose", action="store_true")
args_parser.add_argument("torrents", type=str, nargs=2, metavar="<path/hash>")
options = args_parser.parse_args(argv[1:])
torrents = tcollection.find_torrents_or_hashes(config.core.torrents_dir, options.torrents)
with get_configured_log(config, False, sys.stdout) as log_stdout:
with get_configured_log(config, (not options.verbose), sys.stderr) as log_stderr:
client: Optional[BaseClient] = None
lists = []
for item in torrents:
if isinstance(item, Torrent):
lists.append(item.get_files())
else: # Hash
if client is None:
client = get_configured_client(
config=config,
required=True,
with_customs=False,
log=log_stderr,
)
lists.append(client.get_files(item)) # type: ignore
assert len(lists) == 2
diff = get_torrents_difference(lists[0], lists[1])
log_stdout.print(*fmt.format_torrents_diff(diff, " "))
if __name__ == "__main__":
main() # Do the thing!
| gpl-3.0 |
1kastner/analyse_weather_data | interpolation/interpolator/neural_network_interpolator.py | 1 | 7784 | """
"""
import sys
import logging
import os.path
import datetime
import platform
import pandas
import numpy
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from filter_weather_data import PROCESSED_DATA_DIR
from interpolation.interpolator.logger import StreamToLogger
if platform.uname()[1].startswith("ccblade"): # the output files can turn several gigabyte so better not store them
# on a network drive
PROCESSED_DATA_DIR = "/export/scratch/1kastner"
pandas.set_option("display.max_columns", 500)
pandas.set_option("display.max_rows", 10)
def cloud_cover_converter(val):
if val in ["SKC", "CLR", "NSC", "CAVOC"]: # 0 octas
return 0
elif val == "FEW": # 1-2 octas
return 1
elif val == "SCT": # 3-4 octas
return 2
elif val == "BKN": # 5-7 octas
return 3
elif val == "OVC": # 8 octas
return 4
elif val == "VV": # clouds can not be seen because of rain or fog
return 5
else:
raise RuntimeError(val + "not found")
def load_data(file_name, start_date, end_date, verbose=False):
"""
:param end_date:
:param start_date:
:param file_name: File name, e.g. training_data.csv, evaluation_data.csv
:return: (input_data, target) scikit-conform data
"""
csv_file = os.path.join(
PROCESSED_DATA_DIR,
"neural_networks",
file_name
)
data_df = pandas.read_csv(
csv_file,
index_col="datetime",
parse_dates=["datetime"],
converters={"cloudcover_eddh": cloud_cover_converter}
)
cloud_cover_df = pandas.get_dummies(data_df.cloudcover_eddh, prefix="cloudcover_eddh")
data_df.drop("cloudcover_eddh", axis=1, inplace=True)
cloud_cover_df.set_index(data_df.index, inplace=True)
df_hour = pandas.get_dummies(data_df.index.hour, prefix="hour")
df_hour.set_index(data_df.index, inplace=True)
data_df = data_df.assign(**{column: df_hour[column] for column in df_hour.columns})
data_df = data_df.assign(**{column: cloud_cover_df[column] for column in cloud_cover_df.columns})
data_df = data_df.loc[start_date:end_date]
data_df.reset_index(inplace=True, drop=True)
# this is now binary encoded, so no need for it anymore
# no data means no windgusts were measured, not the absence of measurement instruments
data_df["windgust_eddh"].fillna(0, inplace=True)
# drop columns with NaN, e.g. precipitation at airport is currently not reported at all
data_df.drop("precipitation_eddh", axis=1, inplace=True)
old_len = len(data_df)
# neural networks can not deal with NaN values
data_df.dropna(axis='index', how="any", inplace=True)
new_len = len(data_df)
logging.debug("old: %i, new: %i" % (old_len, new_len))
logging.debug("percentage: %i" % ((old_len / new_len) * 100))
# try to predict temperature
target_df = pandas.DataFrame(data_df.temperature)
# based on information served by airport + learned patterns, so no data from the same private weather station itself
input_df = data_df
for attribute in data_df.columns:
if (
not attribute.endswith("_eddh")
and attribute not in ("lat", "lon")
and not attribute.startswith("hour_")
and not attribute.startswith("month_")
and not "cloudcover" in attribute
):
input_df.drop(attribute, 1, inplace=True)
if verbose:
logging.debug(input_df.head(1))
logging.debug(target_df.head(1))
# only numpy arrays conform with scikit-learn
input_data = input_df.values
target = target_df.values
return input_data, target
def train(mlp_regressor, start_date, end_date, verbose=False):
input_data, target = load_data("training_data.csv", start_date, end_date, verbose=verbose)
if len(input_data) == 0 or len(target) == 0:
logging.warning("training failed because of lack of data")
load_data("training_data.csv", start_date, end_date, verbose=True)
return
mlp_regressor.fit(input_data, target)
predicted_values = mlp_regressor.predict(input_data)
score = numpy.sqrt(mean_squared_error(target, predicted_values))
logging.info("Training RMSE: %.3f" % score)
def evaluate(mlp_regressor, start_date, end_date, verbose=False):
input_data, target = load_data("evaluation_data.csv", start_date, end_date, verbose=verbose)
if len(input_data) == 0 or len(target) == 0:
logging.warning("training failed because of lack of data")
load_data("evaluation_data.csv", start_date, end_date, verbose=True)
return
predicted_values = mlp_regressor.predict(input_data)
score = numpy.sqrt(mean_squared_error(target, predicted_values))
logging.info("Evaluation RMSE: %.3f" % score)
def run_experiment(hidden_layer_sizes, number_months=12, learning_rate=.001):
"""
:param hidden_layer_sizes: The hidden layers, e.g. (40, 10)
:return:
"""
mlp_regressor = MLPRegressor(
hidden_layer_sizes=hidden_layer_sizes,
activation='relu', # most likely linear effects
solver='adam', # good choice for large data sets
alpha=0.0001, # L2 penalty (regularization term) parameter.
batch_size='auto',
learning_rate_init=learning_rate,
max_iter=200,
shuffle=True,
random_state=None,
tol=0.0001,
#verbose=True,
verbose=False,
warm_start=False, # erase previous solution
early_stopping=False, # stop if no increase during validation
validation_fraction=0.1, # belongs to early_stopping
beta_1=0.9, # solver=adam
beta_2=0.999, # solver=adam
epsilon=1e-08 # solver=adam
)
setup_logger(hidden_layer_sizes, learning_rate)
logging.info("hidden_layer_sizes=%s" % str(hidden_layer_sizes))
logging.info("learning_rate=%f" % learning_rate)
for month in range(1, number_months):
month_learned = "2016-%02i" % month
logging.info("learn month %s" % month_learned)
train(mlp_regressor, month_learned, month_learned, verbose=(month == 1))
month_not_yet_learned = "2016-%02i" % (month + 1)
logging.info("validate with month %s" % month_not_yet_learned)
evaluate(mlp_regressor, month_not_yet_learned, month_not_yet_learned)
logging.info(mlp_regressor.get_params())
logger = logging.getLogger()
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
def setup_logger(hidden_layer_sizes, learning_rate):
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
file_name = "interpolation_{date}_neural_network_{hidden_layer_sizes}_lr{lr}.log".format(
hidden_layer_sizes="-".join([str(obj) for obj in hidden_layer_sizes]),
date=datetime.datetime.now().isoformat().replace(":", "-").replace(".", "-"),
lr=learning_rate
)
path_to_file_to_log_to = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"log",
file_name
)
file_handler = logging.FileHandler(path_to_file_to_log_to)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.propagate = False
sys.stderr = StreamToLogger(log, logging.ERROR)
log.info("### Start new logging")
return log
if __name__ == "__main__":
run_experiment((3,), number_months=2)
| agpl-3.0 |
NetDBNCKU/GAE-Conference-Web-App | django/contrib/gis/feeds.py | 86 | 5907 | from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return u' '.join([u'%f %f' % (coord[1], coord[0]) for coord in coords])
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more pouplar
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement(u'geo:lat', u'%f' % lat)
handler.addQuickElement(u'geo:lon', u'%f' % lon)
else:
handler.addQuickElement(u'georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry', None)
if not geom is None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if not box_coords is None:
if w3c_geo: raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement(u'georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo: raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement(u'georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement(u'georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs[u'xmlns:georss'] = u'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs[u'xmlns:georss'] = u'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs[u'xmlns:geo'] = u'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry' : self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry' : self.__get_dynamic_attr('item_geometry', item)}
| bsd-3-clause |
cejebuto/OrfeoWind | include/fckeditor/editor/filemanager/connectors/py/config.py | 55 | 6949 | #!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| agpl-3.0 |
n0trax/ansible | test/units/module_utils/basic/test_exit_json.py | 45 | 7162 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import copy
import json
import sys
from ansible.compat.tests import unittest
from ansible.module_utils import basic
from units.mock.procenv import swap_stdin_and_argv, swap_stdout
empty_invocation = {u'module_args': {}}
class TestAnsibleModuleExitJson(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
self.stdin_swap_ctx = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap_ctx.__enter__()
# since we can't use context managers and "with" without overriding run(), call them directly
self.stdout_swap_ctx = swap_stdout()
self.fake_stream = self.stdout_swap_ctx.__enter__()
basic._ANSIBLE_ARGS = None
self.module = basic.AnsibleModule(argument_spec=dict())
def tearDown(self):
# since we can't use context managers and "with" without overriding run(), call them directly to clean up
self.stdin_swap_ctx.__exit__(None, None, None)
self.stdout_swap_ctx.__exit__(None, None, None)
def test_exit_json_no_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json()
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.fail_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 1)
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset = (
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
def test_exit_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.exit_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
expected['failed'] = True
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.fail_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
| gpl-3.0 |
takeshineshiro/nova | nova/tests/unit/pci/test_request.py | 72 | 7389 | # Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for PCI request."""
from nova import exception
from nova.pci import request
from nova import test
_fake_alias1 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias11 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias2 = """{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "1111",
"device_type": "N"
}"""
_fake_alias3 = """{
"name": "IntelNIC",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""
class AliasTestCase(test.NoDBTestCase):
def test_good_alias(self):
self.flags(pci_alias=[_fake_alias1])
als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict, als['QuicAssist'][0])
def test_multispec_alias(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias11])
als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict1 = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
expect_dict2 = {
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict1, als['QuicAssist'][0])
self.assertEqual(expect_dict2, als['QuicAssist'][1])
def test_wrong_type_aliase(self):
self.flags(pci_alias=[_fake_alias2])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_product_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "g111",
"vendor_id": "1111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_vendor_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "0xg111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_cap_type_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "usb",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_dup_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}""",
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""])
self.assertRaises(
exception.PciInvalidAlias,
request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
for exp, real in exp_real:
self.assertEqual(exp['count'], real.count)
self.assertEqual(exp['alias_name'], real.alias_name)
self.assertEqual(exp['spec'], real.spec)
def test_aliase_2_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': 'ACCEL',
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
requests = request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
self.assertRaises(exception.PciRequestAliasNotDefined,
request._translate_alias_to_requests,
"QuicAssistX : 3")
def test_get_pci_requests_from_flavor(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([1, 3]),
set([p.count for p in requests.requests]))
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests.requests)
| apache-2.0 |
oudalab/fajita | pythonAPI/flask/lib/python3.5/site-packages/setuptools/command/easy_install.py | 21 | 85973 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if instdir not in map(normalize_path, _pythonpath()):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
| mit |
vipul-sharma20/oh-mainline | vendor/packages/whoosh/src/whoosh/filedb/gae.py | 17 | 4580 | """
This module contains EXPERIMENTAL support for storing a Whoosh index's files in
the Google App Engine blobstore. This will use a lot of RAM since all files are
loaded into RAM, but it potentially useful as a workaround for the lack of file
storage in Google App Engine.
Use at your own risk, but please report any problems to me so I can fix them.
To create a new index::
from whoosh.filedb.gae import DataStoreStorage
ix = DataStoreStorage().create_index(schema)
To open an existing index::
ix = DataStoreStorage().open_index()
"""
from google.appengine.api import memcache #@UnresolvedImport
from google.appengine.ext import db #@UnresolvedImport
from whoosh.compat import BytesIO
from whoosh.store import Storage
from whoosh.filedb.fileindex import _create_index, FileIndex, _DEF_INDEX_NAME
from whoosh.filedb.filestore import ReadOnlyError
from whoosh.filedb.structfile import StructFile
class DatastoreFile(db.Model):
"""A file-like object that is backed by a BytesIO() object whose contents
is loaded from a BlobProperty in the app engine datastore.
"""
value = db.BlobProperty()
def __init__(self, *args, **kwargs):
super(DatastoreFile, self).__init__(*args, **kwargs)
self.data = BytesIO()
@classmethod
def loadfile(cls, name):
value = memcache.get(name, namespace="DatastoreFile")
if value is None:
file = cls.get_by_key_name(name)
memcache.set(name, file.value, namespace="DatastoreFile")
else:
file = cls(value=value)
file.data = BytesIO(file.value)
return file
def close(self):
oldvalue = self.value
self.value = self.getvalue()
if oldvalue != self.value:
self.put()
memcache.set(self.key().id_or_name(), self.value,
namespace="DatastoreFile")
def tell(self):
return self.data.tell()
def write(self, data):
return self.data.write(data)
def read(self, length):
return self.data.read(length)
def seek(self, *args):
return self.data.seek(*args)
def readline(self):
return self.data.readline()
def getvalue(self):
return self.data.getvalue()
class MemcacheLock(object):
def __init__(self, name):
self.name = name
def acquire(self, blocking=False):
val = memcache.add(self.name, "L", 360, namespace="whooshlocks")
if blocking and not val:
# Simulate blocking by retrying the acquire over and over
import time
while not val:
time.sleep(0.1)
val = memcache.add(self.name, "", 360, namespace="whooshlocks")
return val
def release(self):
memcache.delete(self.name, namespace="whooshlocks")
class DatastoreStorage(Storage):
"""An implementation of :class:`whoosh.store.Storage` that stores files in
the app engine datastore as blob properties.
"""
def create_index(self, schema, indexname=_DEF_INDEX_NAME):
if self.readonly:
raise ReadOnlyError
_create_index(self, schema, indexname)
return FileIndex(self, schema, indexname)
def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
return FileIndex(self, schema=schema, indexname=indexname)
def list(self):
query = DatastoreFile.all()
keys = []
for file in query:
keys.append(file.key().id_or_name())
return keys
def clean(self):
pass
def total_size(self):
return sum(self.file_length(f) for f in self.list())
def file_exists(self, name):
return DatastoreFile.get_by_key_name(name) != None
def file_length(self, name):
return len(DatastoreFile.get_by_key_name(name).value)
def delete_file(self, name):
memcache.delete(name, namespace="DatastoreFile")
return DatastoreFile.get_by_key_name(name).delete()
def rename_file(self, name, newname, safe=False):
file = DatastoreFile.get_by_key_name(name)
newfile = DatastoreFile(key_name=newname)
newfile.value = file.value
newfile.put()
file.delete()
def create_file(self, name, **kwargs):
f = StructFile(DatastoreFile(key_name=name), name=name,
onclose=lambda sfile: sfile.file.close())
return f
def open_file(self, name, *args, **kwargs):
return StructFile(DatastoreFile.loadfile(name))
def lock(self, name):
return MemcacheLock(name)
| agpl-3.0 |
wmorning/EvilLens | evillens/exponentialdiskLens.py | 1 | 3575 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 29 22:53:20 2014
@author: warrenmorningstar
"""
# ======================================================================
from astropy import units, constants
from astropy.cosmology import FlatLambdaCDM
import numpy as np
import evillens as evil
from scipy.interpolate import interp1d
# ======================================================================
class ExponentialDiskLens(evil.GravitationalLens):
'''
An Exponential disk lens (or chameleon imitating an exponential
disk lens), used for lower mass edge-on spiral galaxy lenses.
Lens model is specified by the following parameters
- b: Deflection scale (amplitude)
- q: axis ratio
- R_s: Scale Radius
- centroid: coordinates of the lens center
- angle: angle of the major axis of the lens
'''
def __init__(self, *args, **kwargs):
super(ExponentialDiskLens, self).__init__(*args, **kwargs)
self.b = 1.
self.q = 1.
self.R_s = 0.1
self.centroid = [0.01,0.01]
self.angle = 0.0
self.n = 1
return
def Build_kappa_map(self,b,q,R_s,centroid,angle):
'''
Set the parameters of the lens model, and build the convergence
map.
'''
self.b = b
self.q = q
self.R_s = R_s
self.centroid=centroid
self.angle=angle + np.pi/2. # set angle relative to y axis
# set to chameleon parameters
self.alpha_chm = pow(10,-0.739-0.527*(self.n-2.03)-0.012*pow(self.n-2.03,2)-0.008*pow(self.n-2.03,3))
self.R0_chm = pow(10,0.078-0.184*(self.n-1.15)+0.473*pow(self.n-1.15,2)-0.079*pow(self.n-1.5,3))*self.R_s
x1p = self.image_x - self.centroid[0]
x2p = self.image_y - self.centroid[1]
x1 = np.cos(-self.angle)*x1p -np.sin(-self.angle)*x2p
x2 = np.sin(-self.angle)*x1p +np.cos(-self.angle)*x2p
self.kappa = b / 2. /self.q / np.sqrt(self.R0_chm**2+x1**2+(x2/self.q)**2)
self.kappa -= b / 2. /self.q / np.sqrt((self.R0_chm/self.alpha_chm)**2+x1**2+(x2/self.q)**2)
def deflect(self):
'''
'''
x1p = self.image_x - self.centroid[0]
x2p = self.image_y - self.centroid[1]
x1 = np.cos(-self.angle)*x1p -np.sin(-self.angle)*x2p
x2 = np.sin(-self.angle)*x1p +np.cos(-self.angle)*x2p
alphaXp1 = self.b/pow(1-pow(self.q,2),0.5)* np.arctan(pow(1-pow(self.q,2),0.5)*x1/(np.sqrt(pow(self.q,2)*(pow(self.R0_chm,2)+pow(x1,2))+pow(x2,2))+pow(self.q,2)*self.R0_chm))
alphaYp1 = self.b/pow(1-pow(self.q,2),0.5)*np.arctanh(pow(1-pow(self.q,2),0.5)*x2/(np.sqrt(pow(self.q,2)*(pow(self.R0_chm,2)+pow(x1,2))+pow(x2,2))+pow(self.q,2)+self.R0_chm))
alphaXp2 = self.b/pow(1-pow(self.q,2),0.5)* np.arctan(pow(1-pow(self.q,2),0.5)*x1/(np.sqrt(pow(self.q,2)*(pow(self.R0_chm/self.alpha_chm,2)+pow(x1,2))+pow(x2,2))+self.R0_chm/self.alpha_chm))
alphaYp2 = self.b/pow(1-pow(self.q,2),0.5)*np.arctanh(pow(1-pow(self.q,2),0.5)*x2/(np.sqrt(pow(self.q,2)*(pow(self.R0_chm/self.alpha_chm,2)+pow(x1,2))+pow(x2,2))+pow(self.q,2)*self.R0_chm/self.alpha_chm))
alphaXp = alphaXp1 - alphaXp2
alphaYp = alphaYp1 - alphaYp2
self.alpha_x = np.cos(self.angle)*alphaXp -np.sin(self.angle)*alphaYp
self.alpha_y = np.sin(self.angle)*alphaXp + np.cos(self.angle)*alphaYp | gpl-2.0 |
ThiagoGarciaAlves/intellij-community | python/helpers/py2only/docutils/parsers/rst/directives/html.py | 128 | 3098 | # $Id: html.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states
from docutils.transforms import components
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line))
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line))
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
class Meta(Directive):
has_content = True
SMkwargs = {'state_classes': (MetaBody,)}
def run(self):
self.assert_has_content()
node = nodes.Element()
new_line_offset, blank_finish = self.state.nested_list_parse(
self.content, self.content_offset, node,
initial_state='MetaBody', blank_finish=True,
state_machine_kwargs=self.SMkwargs)
if (new_line_offset - self.content_offset) != len(self.content):
# incomplete parse of block?
error = self.state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
node += error
return node.children
| apache-2.0 |
jaruba/chromium.src | chrome/test/data/search/tools/instant_extended_manual_tests.py | 70 | 1952 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Records or runs web-page-replay data for InstantExtendedManualTests.*.
Typical usage is:
$ cd src/chrome/test/data/search/tools/
$ sudo instant_extended_manual_tests.py record \
../../../../../third_party/webpagereplay/replay.py \
../../../../../out/Debug/interactive_ui_tests \
../replay/archive.wpr
This will create the archive.wpr file against the lastest google.com GWS.
The tests then can be isolated from the live site by replaying this captured
session on the bots.
This archive.wpr file should be re-recorded and checked into the repo whenever
new manual tests are created.
"""
import signal
import subprocess
import sys
def Usage():
print 'Usage: sudo python instant_extended_manual_tests.py (record|run) \\'
print ' <path/to/replay> <path/to/ui_tests> <path/to/data>'
return 1
def ReplayTests(replay_path, test_path, data_path, arg=None):
# Start up web-page-replay.
if not arg:
p = subprocess.Popen([replay_path, data_path])
else:
p = subprocess.Popen([replay_path, arg, data_path])
# Run the tests within the mock server.
return_value = subprocess.call(
[test_path,
'--gtest_filter=InstantExtendedManualTest.*',
'--run-manual',
'--enable-benchmarking',
'--enable-stats-table',
'--ignore-certificate-errors'])
# Shut down web-page-replay and save the recorded session to |data_path|.
p.send_signal(signal.SIGINT)
p.wait()
return return_value
def main():
if len(sys.argv) != 5:
return Usage()
if sys.argv[1] == 'record':
return ReplayTests(sys.argv[2], sys.argv[3], sys.argv[4], '--record')
if sys.argv[1] == 'run':
return ReplayTests(sys.argv[2], sys.argv[3], sys.argv[4])
return Usage()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/numpy/core/__init__.py | 4 | 4748 | from __future__ import division, absolute_import, print_function
from .info import __doc__
from numpy.version import version as __version__
import os
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy c-extensions failed.
- Try uninstalling and reinstalling numpy.
- If you have already done that, then:
1. Check that you expected to use Python%d.%d from "%s",
and that you have no directories in your PATH or PYTHONPATH that can
interfere with the Python and numpy version "%s" you're trying to use.
2. If (1) looks fine, you can open a new issue at
https://github.com/numpy/numpy/issues. Please include details on:
- how you installed Python
- how you installed numpy
- your operating system
- whether or not you have multiple versions of Python installed
- if you built from source, your compiler versions and ideally a build log
- If you're working with a numpy git repository, try `git clean -xdf`
(removes all files not under version control) and rebuild numpy.
Note: this error has many possible causes, so please don't comment on
an existing issue about this - open a new one instead.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
# Check that multiarray,umath are pure python modules wrapping
# _multiarray_umath and not either of the old c-extension modules
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from . import defchararray as char
from . import records as rec
from .records import *
from .memmap import *
from .defchararray import chararray
from . import function_base
from .function_base import *
from . import machar
from .machar import *
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += rec.__all__
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += machar.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
# The name numpy.core._ufunc_reconstruct must be
# available for unpickling to work.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
# nested. This makes it possible to pickle non-toplevel ufuncs such as
# scipy.special.expit for instance.
mod = __import__(module, fromlist=[name])
return getattr(mod, name)
def _ufunc_reduce(func):
from pickle import whichmodule
name = func.__name__
return _ufunc_reconstruct, (whichmodule(func, name), name)
import sys
if sys.version_info[0] >= 3:
import copyreg
else:
import copy_reg as copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
del sys
del _ufunc_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| apache-2.0 |
zero-rp/miniblink49 | v8_7_5/tools/testrunner/outproc/base.py | 7 | 6048 | # Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
from ..testproc.base import (
DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
from ..local import statusfile
from ..testproc.result import Result
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
class BaseOutProc(object):
def process(self, output, reduction=None):
has_unexpected_output = self.has_unexpected_output(output)
return self._create_result(has_unexpected_output, output, reduction)
def has_unexpected_output(self, output):
return self.get_outcome(output) not in self.expected_outcomes
def _create_result(self, has_unexpected_output, output, reduction):
"""Creates Result instance. When reduction is passed it tries to drop some
parts of the result to save memory and time needed to send the result
across process boundary. None disables reduction and full result is created.
"""
if reduction == DROP_RESULT:
return None
if reduction == DROP_OUTPUT:
return Result(has_unexpected_output, None)
if not has_unexpected_output:
if reduction == DROP_PASS_OUTPUT:
return Result(has_unexpected_output, None)
if reduction == DROP_PASS_STDOUT:
return Result(has_unexpected_output, output.without_text())
return Result(has_unexpected_output, output)
def get_outcome(self, output):
if output.HasCrashed():
return statusfile.CRASH
elif output.HasTimedOut():
return statusfile.TIMEOUT
elif self._has_failed(output):
return statusfile.FAIL
else:
return statusfile.PASS
def _has_failed(self, output):
execution_failed = self._is_failure_output(output)
if self.negative:
return not execution_failed
return execution_failed
def _is_failure_output(self, output):
return output.exit_code != 0
@property
def negative(self):
return False
@property
def expected_outcomes(self):
raise NotImplementedError()
class Negative(object):
@property
def negative(self):
return True
class PassOutProc(BaseOutProc):
"""Output processor optimized for positive tests expected to PASS."""
def has_unexpected_output(self, output):
return self.get_outcome(output) != statusfile.PASS
@property
def expected_outcomes(self):
return OUTCOMES_PASS
class NegPassOutProc(Negative, PassOutProc):
"""Output processor optimized for negative tests expected to PASS"""
pass
class OutProc(BaseOutProc):
"""Output processor optimized for positive tests with expected outcomes
different than a single PASS.
"""
def __init__(self, expected_outcomes):
self._expected_outcomes = expected_outcomes
@property
def expected_outcomes(self):
return self._expected_outcomes
# TODO(majeski): Inherit from PassOutProc in case of OUTCOMES_PASS and remove
# custom get/set state.
def __getstate__(self):
d = self.__dict__
if self._expected_outcomes is OUTCOMES_PASS:
d = d.copy()
del d['_expected_outcomes']
return d
def __setstate__(self, d):
if '_expected_outcomes' not in d:
d['_expected_outcomes'] = OUTCOMES_PASS
self.__dict__.update(d)
# TODO(majeski): Override __reduce__ to make it deserialize as one instance.
DEFAULT = PassOutProc()
DEFAULT_NEGATIVE = NegPassOutProc()
class ExpectedOutProc(OutProc):
"""Output processor that has is_failure_output depending on comparing the
output with the expected output.
"""
def __init__(self, expected_outcomes, expected_filename):
super(ExpectedOutProc, self).__init__(expected_outcomes)
self._expected_filename = expected_filename
def _is_failure_output(self, output):
with open(self._expected_filename, 'r') as f:
expected_lines = f.readlines()
for act_iterator in self._act_block_iterator(output):
for expected, actual in itertools.izip_longest(
self._expected_iterator(expected_lines),
act_iterator,
fillvalue=''
):
if expected != actual:
return True
return False
def _act_block_iterator(self, output):
"""Iterates over blocks of actual output lines."""
lines = output.stdout.splitlines()
start_index = 0
found_eqeq = False
for index, line in enumerate(lines):
# If a stress test separator is found:
if line.startswith('=='):
# Iterate over all lines before a separator except the first.
if not found_eqeq:
found_eqeq = True
else:
yield self._actual_iterator(lines[start_index:index])
# The next block of output lines starts after the separator.
start_index = index + 1
# Iterate over complete output if no separator was found.
if not found_eqeq:
yield self._actual_iterator(lines)
def _actual_iterator(self, lines):
return self._iterator(lines, self._ignore_actual_line)
def _expected_iterator(self, lines):
return self._iterator(lines, self._ignore_expected_line)
def _ignore_actual_line(self, line):
"""Ignore empty lines, valgrind output, Android output and trace
incremental marking output.
"""
if not line:
return True
return (line.startswith('==') or
line.startswith('**') or
line.startswith('ANDROID') or
line.startswith('###') or
# FIXME(machenbach): The test driver shouldn't try to use slow
# asserts if they weren't compiled. This fails in optdebug=2.
line == 'Warning: unknown flag --enable-slow-asserts.' or
line == 'Try --help for options')
def _ignore_expected_line(self, line):
return not line
def _iterator(self, lines, ignore_predicate):
for line in lines:
line = line.strip()
if not ignore_predicate(line):
yield line
| apache-2.0 |
annarev/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/sample_from_datasets_serialization_test.py | 1 | 1888 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the SampleFromDatasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class SampleFromDatasetsCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, probs, num_samples):
dataset = interleave_ops.sample_from_datasets(
[
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(len(probs))
],
probs,
seed=1813)
return dataset.take(num_samples)
@combinations.generate(test_base.default_test_combinations())
def testCheckpointCore(self):
self.run_core_tests(lambda: self._build_dataset([0.5, 0.5], 100), 100)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rhololkeolke/apo-website | src/oauth2/clients/smtp.py | 884 | 1680 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| bsd-3-clause |
ecoPlanos/SputnikRedeSensores | Kicad/libs/agg-kicad/scripts/kicad_mod.py | 2 | 2908 | """
kicad_mod.py
Copyright 2015 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Helper functions for generating KiCAD footprint files.
"""
from __future__ import print_function, division
CTYD_GAP = 0.25
CTYD_GRID = 0.05
CTYD_WIDTH = 0.01
SILK_WIDTH = 0.15
FAB_WIDTH = 0.01
FONT_SIZE = (1.0, 1.0)
FONT_THICKNESS = 0.15
FONT_HALFHEIGHT = 0.7
def fp_line(start, end, layer, width):
return ["fp_line",
["start", start[0], start[1]],
["end", end[0], end[1]],
["layer", layer],
["width", width]]
def fp_arc(start, end, angle, layer, width):
return ["fp_arc",
["start", start[0], start[1]],
["end", end[0], end[1]],
["angle", angle],
["layer", layer],
["width", width]]
def fp_circle(centre, end, layer, width):
return ["fp_circle",
["center", centre[0], centre[1]],
["end", end[0], end[1]],
["layer", layer],
["width", width]]
def fp_text(texttype, text, at, layer, size, thickness):
return ["fp_text", texttype, text,
["at"] + list(at),
["layer", layer],
["effects",
["font",
["size", size[0], size[1]],
["thickness", thickness]]]]
def pad(num, padtype, shape, at, size, layers, drill=None, offset=None,
m_mask=None, m_paste=None):
pad = ["pad", num, padtype, shape,
["at", at[0], at[1]],
["size"] + list(size),
["layers"] + list(layers)]
if drill is not None or offset is not None:
d = ["drill"]
if drill is not None:
if isinstance(drill, (float, int)):
d.append(drill)
else:
d += drill
if offset is not None:
d.append(["offset"] + offset)
pad.append(d)
if m_mask is not None:
pad.append(["solder_mask_margin", m_mask])
if m_paste is not None:
pad.append(["solder_paste_margin", m_paste])
return pad
def draw_square(width, height, centre, layer, thickness):
"""Draw a square of (`width`, `height`) centered on `centre`."""
out = []
ne = (width/2 + centre[0], -height/2 + centre[1])
nw = (-width/2 + centre[0], -height/2 + centre[1])
se = (width/2 + centre[0], height/2 + centre[1])
sw = (-width/2 + centre[0], height/2 + centre[1])
out.append(fp_line(nw, ne, layer, thickness))
out.append(fp_line(ne, se, layer, thickness))
out.append(fp_line(se, sw, layer, thickness))
out.append(fp_line(sw, nw, layer, thickness))
return nw, ne, se, sw, out
def model(path, offset, scale, rotate):
return ["model", path,
["at", ["xyz", offset[0], offset[1], offset[2]]],
["scale", ["xyz", scale[0], scale[1], scale[2]]],
["rotate", ["xyz", rotate[0], rotate[1], rotate[2]]]]
| gpl-3.0 |
alexei-matveev/ase-local | ase/io/plt.py | 7 | 1598 | import numpy as np
from ase.atoms import Atoms
def write_plt(filename, atoms, data):
if isinstance(atoms, Atoms):
cell = atoms.get_cell()
else:
cell = np.asarray(atoms, float)
if cell.ndim == 2:
c = cell.copy()
cell = c.diagonal()
c.flat[::4] = 0.0
if c.any():
raise ValueError('Unit cell must be orthorhombic!')
f = open(filename, 'w')
np.array([3, 4], np.int32).tofile(f)
dims = np.array(data.shape, np.int32)
dims[::-1].tofile(f)
for n, L in zip(dims[::-1], cell[::-1]):
if n % 2 == 0:
d = L / n
np.array([0.0, L - d], np.float32).tofile(f)
else:
d = L / (n + 1)
np.array([d, L - d], np.float32).tofile(f)
if data.dtype == complex:
data = np.abs(data)
data.astype(np.float32).T.tofile(f)
f.close()
def read_plt(fileobj):
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rb')
# dummy numbers
np.fromfile(fileobj, dtype=np.int32, count=2)
# read dimensions
dims = np.fromfile(fileobj, dtype=np.int32, count=3)
size = dims[0] * dims[1] * dims[2]
# read cell
cell = np.zeros((3,3), np.float32)
for c in range(3):
beg, Lmd = np.fromfile(fileobj, dtype=np.float32, count=2)
n = dims[c]
if n % 2 == 0:
cell[2 - c, 2 - c] = Lmd / (1 - 1. / n)
else:
cell[2 - c, 2 - c] = Lmd / (1 - 1. / (n + 1))
# read data
data = np.fromfile(fileobj, dtype=np.float32)
return data.reshape(dims).T, cell
| gpl-2.0 |
OpenMined/PySyft | packages/syft/src/syft/ast/property.py | 1 | 1943 | """This module contains `Property` attribute representing property objects which
implements getter and setter objects."""
# stdlib
from typing import Any
from typing import Callable as CallableT
from typing import Optional
from typing import Tuple
from typing import Union
# syft relative
from .. import ast
from ..logger import traceback_and_raise
class Property(ast.attribute.Attribute):
"""Creates property objects which implements getter and setter objects.
Each valid action on AST triggers GetSetPropertyAction.
"""
def __init__(
self,
path_and_name: str,
parent: ast.attribute.Attribute,
object_ref: Optional[Any] = None,
return_type_name: Optional[str] = None,
client: Optional[Any] = None,
) -> None:
"""Base constructor for Property Attribute.
Args:
client: The client for which all computation is being executed.
path_and_name: The path for the current node, e.g. `syft.lib.python.List`.
object_ref: The actual python object for which the computation is being made.
return_type_name: The given action's return type name, with its full path, in string format.
parent: The parent node in the AST.
"""
super().__init__(
path_and_name=path_and_name,
parent=parent,
object_ref=object_ref,
return_type_name=return_type_name,
client=client,
)
self.is_static = False
def __call__(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Optional[Union[Any, CallableT]]:
"""`Property` attribute is not callable.
Args:
*args: List of arguments.
**kwargs: Keyword arguments.
Raises:
ValueError: If the function is called.
"""
traceback_and_raise(ValueError("Property should never be called."))
| apache-2.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/application/app.py | 28 | 21708 | # -*- test-case-name: twisted.test.test_application,twisted.test.test_twistd -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os, pdb, getpass, traceback, signal
from operator import attrgetter
from twisted.python import runtime, log, usage, failure, util, logfile
from twisted.python.versions import Version
from twisted.python.reflect import qual, namedAny
from twisted.python.deprecate import deprecated
from twisted.python.log import ILogObserver
from twisted.persisted import sob
from twisted.application import service, reactors
from twisted.internet import defer
from twisted import copyright, plugin
# Expose the new implementation of installReactor at the old location.
from twisted.application.reactors import installReactor
from twisted.application.reactors import NoSuchReactor
class _BasicProfiler(object):
"""
@ivar saveStats: if C{True}, save the stats information instead of the
human readable format
@type saveStats: C{bool}
@ivar profileOutput: the name of the file use to print profile data.
@type profileOutput: C{str}
"""
def __init__(self, profileOutput, saveStats):
self.profileOutput = profileOutput
self.saveStats = saveStats
def _reportImportError(self, module, e):
"""
Helper method to report an import error with a profile module. This
has to be explicit because some of these modules are removed by
distributions due to them being non-free.
"""
s = "Failed to import module %s: %s" % (module, e)
s += """
This is most likely caused by your operating system not including
the module due to it being non-free. Either do not use the option
--profile, or install the module; your operating system vendor
may provide it in a separate package.
"""
raise SystemExit(s)
class ProfileRunner(_BasicProfiler):
"""
Runner for the standard profile module.
"""
def run(self, reactor):
"""
Run reactor under the standard profiler.
"""
try:
import profile
except ImportError, e:
self._reportImportError("profile", e)
p = profile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
tmp, sys.stdout = sys.stdout, open(self.profileOutput, 'a')
try:
p.print_stats()
finally:
sys.stdout, tmp = tmp, sys.stdout
tmp.close()
class HotshotRunner(_BasicProfiler):
"""
Runner for the hotshot profile module.
"""
def run(self, reactor):
"""
Run reactor under the hotshot profiler.
"""
try:
import hotshot.stats
except (ImportError, SystemExit), e:
# Certain versions of Debian (and Debian derivatives) raise
# SystemExit when importing hotshot if the "non-free" profiler
# module is not installed. Someone eventually recognized this
# as a bug and changed the Debian packaged Python to raise
# ImportError instead. Handle both exception types here in
# order to support the versions of Debian which have this
# behavior. The bug report which prompted the introduction of
# this highly undesirable behavior should be available online at
# <http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=334067>.
# There seems to be no corresponding bug report which resulted
# in the behavior being removed. -exarkun
self._reportImportError("hotshot", e)
# this writes stats straight out
p = hotshot.Profile(self.profileOutput)
p.runcall(reactor.run)
if self.saveStats:
# stats are automatically written to file, nothing to do
return
else:
s = hotshot.stats.load(self.profileOutput)
s.strip_dirs()
s.sort_stats(-1)
s.stream = open(self.profileOutput, 'w')
s.print_stats()
s.stream.close()
class CProfileRunner(_BasicProfiler):
"""
Runner for the cProfile module.
"""
def run(self, reactor):
"""
Run reactor under the cProfile profiler.
"""
try:
import cProfile, pstats
except ImportError, e:
self._reportImportError("cProfile", e)
p = cProfile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
stream = open(self.profileOutput, 'w')
s = pstats.Stats(p, stream=stream)
s.strip_dirs()
s.sort_stats(-1)
s.print_stats()
stream.close()
class AppProfiler(object):
"""
Class which selects a specific profile runner based on configuration
options.
@ivar profiler: the name of the selected profiler.
@type profiler: C{str}
"""
profilers = {"profile": ProfileRunner, "hotshot": HotshotRunner,
"cprofile": CProfileRunner}
def __init__(self, options):
saveStats = options.get("savestats", False)
profileOutput = options.get("profile", None)
self.profiler = options.get("profiler", "hotshot").lower()
if self.profiler in self.profilers:
profiler = self.profilers[self.profiler](profileOutput, saveStats)
self.run = profiler.run
else:
raise SystemExit("Unsupported profiler name: %s" % (self.profiler,))
class AppLogger(object):
"""
Class managing logging faciliy of the application.
@ivar _logfilename: The name of the file to which to log, if other than the
default.
@type _logfilename: C{str}
@ivar _observerFactory: Callable object that will create a log observer, or
None.
@ivar _observer: log observer added at C{start} and removed at C{stop}.
@type _observer: C{callable}
"""
_observer = None
def __init__(self, options):
self._logfilename = options.get("logfile", "")
self._observerFactory = options.get("logger") or None
def start(self, application):
"""
Initialize the logging system.
If a customer logger was specified on the command line it will be
used. If not, and an L{ILogObserver} component has been set on
C{application}, then it will be used as the log observer. Otherwise a
log observer will be created based on the command-line options for
built-in loggers (e.g. C{--logfile}).
@param application: The application on which to check for an
L{ILogObserver}.
"""
if self._observerFactory is not None:
observer = self._observerFactory()
else:
observer = application.getComponent(ILogObserver, None)
if observer is None:
observer = self._getLogObserver()
self._observer = observer
log.startLoggingWithObserver(self._observer)
self._initialLog()
def _initialLog(self):
"""
Print twistd start log message.
"""
from twisted.internet import reactor
log.msg("twistd %s (%s %s) starting up." % (copyright.version,
sys.executable,
runtime.shortPythonVersion()))
log.msg('reactor class: %s.' % (qual(reactor.__class__),))
def _getLogObserver(self):
"""
Create a log observer to be added to the logging system before running
this application.
"""
if self._logfilename == '-' or not self._logfilename:
logFile = sys.stdout
else:
logFile = logfile.LogFile.fromFullPath(self._logfilename)
return log.FileLogObserver(logFile).emit
def stop(self):
"""
Print twistd stop log message.
"""
log.msg("Server Shut Down.")
if self._observer is not None:
log.removeObserver(self._observer)
self._observer = None
def fixPdb():
def do_stop(self, arg):
self.clear_all_breaks()
self.set_continue()
from twisted.internet import reactor
reactor.callLater(0, reactor.stop)
return 1
def help_stop(self):
print """stop - Continue execution, then cleanly shutdown the twisted reactor."""
def set_quit(self):
os._exit(0)
pdb.Pdb.set_quit = set_quit
pdb.Pdb.do_stop = do_stop
pdb.Pdb.help_stop = help_stop
def runReactorWithLogging(config, oldstdout, oldstderr, profiler=None, reactor=None):
"""
Start the reactor, using profiling if specified by the configuration, and
log any error happening in the process.
@param config: configuration of the twistd application.
@type config: L{ServerOptions}
@param oldstdout: initial value of C{sys.stdout}.
@type oldstdout: C{file}
@param oldstderr: initial value of C{sys.stderr}.
@type oldstderr: C{file}
@param profiler: object used to run the reactor with profiling.
@type profiler: L{AppProfiler}
@param reactor: The reactor to use. If C{None}, the global reactor will
be used.
"""
if reactor is None:
from twisted.internet import reactor
try:
if config['profile']:
if profiler is not None:
profiler.run(reactor)
elif config['debug']:
sys.stdout = oldstdout
sys.stderr = oldstderr
if runtime.platformType == 'posix':
signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace())
signal.signal(signal.SIGINT, lambda *args: pdb.set_trace())
fixPdb()
pdb.runcall(reactor.run)
else:
reactor.run()
except:
if config['nodaemon']:
file = oldstdout
else:
file = open("TWISTD-CRASH.log",'a')
traceback.print_exc(file=file)
file.flush()
def getPassphrase(needed):
if needed:
return getpass.getpass('Passphrase: ')
else:
return None
def getSavePassphrase(needed):
if needed:
passphrase = util.getPassword("Encryption passphrase: ")
else:
return None
class ApplicationRunner(object):
"""
An object which helps running an application based on a config object.
Subclass me and implement preApplication and postApplication
methods. postApplication generally will want to run the reactor
after starting the application.
@ivar config: The config object, which provides a dict-like interface.
@ivar application: Available in postApplication, but not
preApplication. This is the application object.
@ivar profilerFactory: Factory for creating a profiler object, able to
profile the application if options are set accordingly.
@ivar profiler: Instance provided by C{profilerFactory}.
@ivar loggerFactory: Factory for creating object responsible for logging.
@ivar logger: Instance provided by C{loggerFactory}.
"""
profilerFactory = AppProfiler
loggerFactory = AppLogger
def __init__(self, config):
self.config = config
self.profiler = self.profilerFactory(config)
self.logger = self.loggerFactory(config)
def run(self):
"""
Run the application.
"""
self.preApplication()
self.application = self.createOrGetApplication()
self.logger.start(self.application)
self.postApplication()
self.logger.stop()
def startReactor(self, reactor, oldstdout, oldstderr):
"""
Run the reactor with the given configuration. Subclasses should
probably call this from C{postApplication}.
@see: L{runReactorWithLogging}
"""
runReactorWithLogging(
self.config, oldstdout, oldstderr, self.profiler, reactor)
def preApplication(self):
"""
Override in subclass.
This should set up any state necessary before loading and
running the Application.
"""
raise NotImplementedError()
def postApplication(self):
"""
Override in subclass.
This will be called after the application has been loaded (so
the C{application} attribute will be set). Generally this
should start the application and run the reactor.
"""
raise NotImplementedError()
def createOrGetApplication(self):
"""
Create or load an Application based on the parameters found in the
given L{ServerOptions} instance.
If a subcommand was used, the L{service.IServiceMaker} that it
represents will be used to construct a service to be added to
a newly-created Application.
Otherwise, an application will be loaded based on parameters in
the config.
"""
if self.config.subCommand:
# If a subcommand was given, it's our responsibility to create
# the application, instead of load it from a file.
# loadedPlugins is set up by the ServerOptions.subCommands
# property, which is iterated somewhere in the bowels of
# usage.Options.
plg = self.config.loadedPlugins[self.config.subCommand]
ser = plg.makeService(self.config.subOptions)
application = service.Application(plg.tapname)
ser.setServiceParent(application)
else:
passphrase = getPassphrase(self.config['encrypted'])
application = getApplication(self.config, passphrase)
return application
def getApplication(config, passphrase):
s = [(config[t], t)
for t in ['python', 'source', 'file'] if config[t]][0]
filename, style = s[0], {'file':'pickle'}.get(s[1],s[1])
try:
log.msg("Loading %s..." % filename)
application = service.loadApplication(filename, style, passphrase)
log.msg("Loaded.")
except Exception, e:
s = "Failed to load application: %s" % e
if isinstance(e, KeyError) and e.args[0] == "application":
s += """
Could not find 'application' in the file. To use 'twistd -y', your .tac
file must create a suitable object (e.g., by calling service.Application())
and store it in a variable named 'application'. twistd loads your .tac file
and scans the global variables for one of this name.
Please read the 'Using Application' HOWTO for details.
"""
traceback.print_exc(file=log.logfile)
log.msg(s)
log.deferr()
sys.exit('\n' + s + '\n')
return application
def _reactorAction():
return usage.CompleteList([r.shortName for r in reactors.getReactorTypes()])
class ReactorSelectionMixin:
"""
Provides options for selecting a reactor to install.
If a reactor is installed, the short name which was used to locate it is
saved as the value for the C{"reactor"} key.
"""
compData = usage.Completions(
optActions={"reactor": _reactorAction})
messageOutput = sys.stdout
_getReactorTypes = staticmethod(reactors.getReactorTypes)
def opt_help_reactors(self):
"""
Display a list of possibly available reactor names.
"""
rcts = sorted(self._getReactorTypes(), key=attrgetter('shortName'))
for r in rcts:
self.messageOutput.write(' %-4s\t%s\n' %
(r.shortName, r.description))
raise SystemExit(0)
def opt_reactor(self, shortName):
"""
Which reactor to use (see --help-reactors for a list of possibilities)
"""
# Actually actually actually install the reactor right at this very
# moment, before any other code (for example, a sub-command plugin)
# runs and accidentally imports and installs the default reactor.
#
# This could probably be improved somehow.
try:
installReactor(shortName)
except NoSuchReactor:
msg = ("The specified reactor does not exist: '%s'.\n"
"See the list of available reactors with "
"--help-reactors" % (shortName,))
raise usage.UsageError(msg)
except Exception, e:
msg = ("The specified reactor cannot be used, failed with error: "
"%s.\nSee the list of available reactors with "
"--help-reactors" % (e,))
raise usage.UsageError(msg)
else:
self["reactor"] = shortName
opt_r = opt_reactor
class ServerOptions(usage.Options, ReactorSelectionMixin):
longdesc = ("twistd reads a twisted.application.service.Application out "
"of a file and runs it.")
optFlags = [['savestats', None,
"save the Stats object rather than the text output of "
"the profiler."],
['no_save','o', "do not save state on shutdown"],
['encrypted', 'e',
"The specified tap/aos file is encrypted."]]
optParameters = [['logfile','l', None,
"log to a specified file, - for stdout"],
['logger', None, None,
"A fully-qualified name to a log observer factory to use "
"for the initial log observer. Takes precedence over "
"--logfile and --syslog (when available)."],
['profile', 'p', None,
"Run in profile mode, dumping results to specified file"],
['profiler', None, "hotshot",
"Name of the profiler to use (%s)." %
", ".join(AppProfiler.profilers)],
['file','f','twistd.tap',
"read the given .tap file"],
['python','y', None,
"read an application from within a Python file "
"(implies -o)"],
['source', 's', None,
"Read an application from a .tas file (AOT format)."],
['rundir','d','.',
'Change to a supplied directory before running']]
compData = usage.Completions(
mutuallyExclusive=[("file", "python", "source")],
optActions={"file": usage.CompleteFiles("*.tap"),
"python": usage.CompleteFiles("*.(tac|py)"),
"source": usage.CompleteFiles("*.tas"),
"rundir": usage.CompleteDirs()}
)
_getPlugins = staticmethod(plugin.getPlugins)
def __init__(self, *a, **kw):
self['debug'] = False
usage.Options.__init__(self, *a, **kw)
def opt_debug(self):
"""
Run the application in the Python Debugger (implies nodaemon),
sending SIGUSR2 will drop into debugger
"""
defer.setDebugging(True)
failure.startDebugMode()
self['debug'] = True
opt_b = opt_debug
def opt_spew(self):
"""
Print an insanely verbose log of everything that happens.
Useful when debugging freezes or locks in complex code."""
sys.settrace(util.spewer)
try:
import threading
except ImportError:
return
threading.settrace(util.spewer)
def parseOptions(self, options=None):
if options is None:
options = sys.argv[1:] or ["--help"]
usage.Options.parseOptions(self, options)
def postOptions(self):
if self.subCommand or self['python']:
self['no_save'] = True
if self['logger'] is not None:
try:
self['logger'] = namedAny(self['logger'])
except Exception, e:
raise usage.UsageError("Logger '%s' could not be imported: %s"
% (self['logger'], e))
def subCommands(self):
plugins = self._getPlugins(service.IServiceMaker)
self.loadedPlugins = {}
for plug in sorted(plugins, key=attrgetter('tapname')):
self.loadedPlugins[plug.tapname] = plug
yield (plug.tapname,
None,
# Avoid resolving the options attribute right away, in case
# it's a property with a non-trivial getter (eg, one which
# imports modules).
lambda plug=plug: plug.options(),
plug.description)
subCommands = property(subCommands)
def run(runApp, ServerOptions):
config = ServerOptions()
try:
config.parseOptions()
except usage.error, ue:
print config
print "%s: %s" % (sys.argv[0], ue)
else:
runApp(config)
def convertStyle(filein, typein, passphrase, fileout, typeout, encrypt):
application = service.loadApplication(filein, typein, passphrase)
sob.IPersistable(application).setStyle(typeout)
passphrase = getSavePassphrase(encrypt)
if passphrase:
fileout = None
sob.IPersistable(application).save(filename=fileout, passphrase=passphrase)
def startApplication(application, save):
from twisted.internet import reactor
service.IService(application).startService()
if save:
p = sob.IPersistable(application)
reactor.addSystemEventTrigger('after', 'shutdown', p.save, 'shutdown')
reactor.addSystemEventTrigger('before', 'shutdown',
service.IService(application).stopService)
| gpl-2.0 |
wtsi-hgi/common-python | hgicommon/tests/data_source/test_basic.py | 2 | 2390 | """
Legalese
--------
Copyright (c) 2015, 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of HGI's common Python library
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from hgicommon.data_source import ListDataSource, MultiDataSource
class TestMultiDataSource(unittest.TestCase):
"""
Tests for `MultiDataSource`.
"""
def setUp(self):
self.data = [i for i in range(10)]
self.sources = [ListDataSource([self.data[i]]) for i in range(len(self.data))]
def test_init_change_of_source_list_has_no_effect(self):
source = MultiDataSource(self.sources)
self.sources.pop()
self.assertCountEqual(source.get_all(), self.data)
def test_get_all_when_no_sources(self):
source = MultiDataSource()
self.assertEqual(len(source.get_all()), 0)
def test_get_all_when_sources(self):
source = MultiDataSource(self.sources)
self.assertIsInstance(source.get_all()[0], type(self.data[0]))
self.assertCountEqual(source.get_all(), self.data)
class TestListDataSource(unittest.TestCase):
"""
Tests for `ListDataSource`.
"""
def setUp(self):
self.data = [i for i in range(10)]
def test_init_data_optional(self):
source = ListDataSource()
for datum in self.data:
source.data.append(datum)
self.assertCountEqual(source.get_all(), self.data)
def test_init_data_can_be_changed(self):
source = ListDataSource(self.data)
self.data.append(11)
self.assertCountEqual(source.get_all(), self.data)
def test_get_all(self):
source = ListDataSource(self.data)
self.assertCountEqual(source.get_all(), self.data)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
drglove/SickRage | lib/sqlalchemy/dialects/mssql/base.py | 78 | 53331 | # mssql/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
Auto Increment Behavior
-----------------------
``IDENTITY`` columns are supported by using SQLAlchemy
``schema.Sequence()`` objects. In other words::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah',100,10), primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield::
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
Implicit ``autoincrement`` behavior works the same in MSSQL as it
does in other dialects and results in an ``IDENTITY`` column.
* Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for
``INSERT`` s)
* Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on
``INSERT``
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
.. versionadded:: 0.8 Character collations are now part of the base string
types.
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
.. versionadded:: 0.8
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
.. versionadded:: 0.9.2
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. versionadded:: 0.8
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. versionadded:: 0.8
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
* reflection of indexes does not work with versions older than
SQL Server 2005
"""
import datetime
import operator
import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, \
util as sql_util, cast
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'tinyint': TINYINT,
'varchar': VARCHAR,
'nvarchar': NVARCHAR,
'char': CHAR,
'nchar': NCHAR,
'text': TEXT,
'ntext': NTEXT,
'decimal': DECIMAL,
'numeric': NUMERIC,
'float': FLOAT,
'datetime': DATETIME,
'datetime2': DATETIME2,
'datetimeoffset': DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime': SMALLDATETIME,
'binary': BINARY,
'varbinary': VARBINARY,
'bit': BIT,
'real': REAL,
'image': IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_):
return "SMALLDATETIME"
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_unicode_text(self, type_):
return self.visit_NTEXT(type_)
def visit_NTEXT(self, type_):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
def visit_CHAR(self, type_):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
def visit_date(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_DATE(type_)
def visit_time(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_TIME(type_)
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_VARBINARY(self, type_):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.root_connection._cursor_execute(self.cursor,
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl),
(), self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid", (), self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(self.cursor,
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table),
(), self)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.\
format_table(self.compiled.statement.table)
)
except:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, it's version of LIMIT here """
if select._distinct or select._limit is not None:
s = select._distinct and "DISTINCT " or ""
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
if select._limit is not None:
if not select._offset:
s += "TOP %d " % select._limit
return s
return compiler.SQLCompiler.get_select_precolumns(self, select)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if select._offset and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
_limit = select._limit
_order_by_clauses = select._order_by_clause.clauses
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")
).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key != 'mssql_rn'])
limitselect.append_whereclause(mssql_rn > _offset)
if _limit is not None:
limitselect.append_whereclause(mssql_rn <= (_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
kwargs['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, add_to_result_map=None, **kwargs):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type
)
return super(MSSQLCompiler, self).\
visit_column(converted, **kwargs)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(None, adapter.traverse(c),
True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, sa_schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence or an implicit IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (start, column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
if index.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table),
', '.join(
self.sql_compiler.process(expr,
include_table=False, literal_binds=True) for
expr in index.expressions)
)
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
if isinstance(col, util.string_types) else col
for col in index.dialect_options['mssql']['include']]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
for c in inclusions])
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
connection.execute("use %s" % dbname)
try:
return fn(*arg, **kw)
finally:
if dbname:
connection.execute("use %s" % current_db)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return schema.split(".", 1)
else:
return None, schema
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
}
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {
"clustered": False
}),
(sa_schema.UniqueConstraint, {
"clustered": False
}),
(sa_schema.Index, {
"clustered": False,
"include": None
})
]
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo", **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
def _get_default_schema_name(self, connection):
user_name = connection.scalar("SELECT user_name()")
if user_name is not None:
# now, get the default schema
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE name = :name
AND type = 'S'
""")
try:
default_schema_name = connection.scalar(query, name=user_name)
if default_schema_name is not None:
return util.text_type(default_schema_name)
except:
pass
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(whereclause,
columns.c.table_schema == owner)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={
'name': sqltypes.Unicode()
}
)
)
indexes = {}
for row in rp:
indexes[row['index_id']] = {
'name': row['name'],
'unique': row['is_unique'] == 1,
'column_names': []
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={'name': sqltypes.Unicode()}
),
)
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(self, connection, viewname, dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
]
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
if owner:
whereclause = sql.and_(columns.c.table_name == tablename,
columns.c.table_schema == owner)
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype == MSText or \
(coltype in (MSString, MSNVarchar) and charlen == -1):
kwargs.pop('length')
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, owner))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start': int(row[0]),
'increment': int(row[1])
})
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(self, connection, tablename, dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
s = sql.select([C.c.column_name, TC.c.constraint_type, C.c.constraint_name],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner)
)
c = connection.execute(s)
constraint_name = None
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(self, connection, tablename, dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| gpl-3.0 |
patriciolobos/desa8 | openerp/report/render/rml2pdf/trml2pdf.py | 256 | 46679 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval as eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.error("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except ImportError:
# means there is no custom fonts mapping in this system.
pass
except Exception:
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
grahamking/goodenergy | core/gravatar.py | 1 | 1567 | """Makes gravatar url for given email address"""
# Copyright 2010,2011 Good Energy Research Inc. <graham@goodenergy.ca>, <jeremy@goodenergy.ca>
#
# This file is part of Good Energy.
#
# Good Energy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Good Energy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Good Energy. If not, see <http://www.gnu.org/licenses/>.
#
import urllib
from django.conf import settings
from django.utils.hashcompat import md5_constructor
from django.utils.html import escape
GRAVATAR_URL_PREFIX = getattr(settings, "GRAVATAR_URL_PREFIX", "http://www.gravatar.com/")
GRAVATAR_DEFAULT_IMAGE = getattr(settings, "GRAVATAR_DEFAULT_IMAGE", "")
GRAVATAR_DEFAULT_SIZE = 40
def for_email(email, size=80):
"Copied from http://code.google.com/p/django-gravatar"
url = "%savatar/%s/?" % (GRAVATAR_URL_PREFIX, md5_constructor(email).hexdigest())
#url += urllib.urlencode({"s": str(size), "default": GRAVATAR_DEFAULT_IMAGE})
url += urllib.urlencode({"s": str(size), "default": "404"})
return escape(url)
| agpl-3.0 |
anthill-services/anthill-common | anthill/common/social/__init__.py | 1 | 2284 |
import tornado.httpclient
import ujson
import logging
from .. import cached
from .. internal import Internal
class APIError(Exception):
def __init__(self, code, body):
self.code = code
self.body = body
def __str__(self):
return str(self.code) + ": " + self.body
class AuthResponse(object):
def __getattr__(self, item):
return self.data.get(item, None)
def __init__(self, *args, **kwargs):
self.data = {key: value for key, value in kwargs.items() if value is not None}
def __str__(self):
return ujson.dumps(self.data)
def data(self):
return self.data
@staticmethod
def parse(data):
content = ujson.loads(data)
return AuthResponse(**content)
class SocialNetworkAPI(object):
def __init__(self, credential_type, cache):
self.client = tornado.httpclient.AsyncHTTPClient()
self.internal = Internal()
self.cache = cache
self.credential_type = credential_type
async def get_private_key(self, gamespace, data=None):
"""
Looks for a key from login service.
"""
if not data:
key_name = self.credential_type
@cached(kv=self.cache,
h=lambda: "auth_key:" + str(gamespace) + ":" + key_name,
ttl=300,
json=True)
async def get():
logging.info("Looking for key '{0}' in gamespace @{1}".format(key_name, gamespace))
key_data = await self.internal.request(
"login", "get_key", gamespace=gamespace, key_name=key_name)
return key_data
data = await get()
return self.new_private_key(data)
def has_private_key(self):
return False
def new_private_key(self, data):
raise NotImplementedError()
class SocialPrivateKey(object):
def __init__(self, data):
self.data = data
def get_app_id(self):
return None
def dump(self):
return self.data
def has_ui(self):
return False
def get(self):
raise NotImplementedError()
def render(self):
raise NotImplementedError()
def update(self, **kwargs):
raise NotImplementedError()
| mit |
lijoantony/django-oscar | tests/integration/shipping/scales_tests.py | 26 | 2242 | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.shipping.scales import Scale
from oscar.apps.basket.models import Basket
from oscar.test import factories
class TestScales(TestCase):
def test_weighs_uses_specified_attribute(self):
scale = Scale(attribute_code='weight')
p = factories.create_product(attributes={'weight': '1'})
self.assertEqual(1, scale.weigh_product(p))
def test_uses_default_weight_when_attribute_is_missing(self):
scale = Scale(attribute_code='weight', default_weight=0.5)
p = factories.create_product()
self.assertEqual(0.5, scale.weigh_product(p))
def test_raises_exception_when_attribute_is_missing(self):
scale = Scale(attribute_code='weight')
p = factories.create_product()
with self.assertRaises(ValueError):
scale.weigh_product(p)
def test_returns_zero_for_empty_basket(self):
basket = Basket()
scale = Scale(attribute_code='weight')
self.assertEqual(0, scale.weigh_basket(basket))
def test_returns_correct_weight_for_nonempty_basket(self):
basket = factories.create_basket(empty=True)
products = [
factories.create_product(attributes={'weight': '1'},
price=D('5.00')),
factories.create_product(attributes={'weight': '2'},
price=D('5.00'))]
for product in products:
basket.add(product)
scale = Scale(attribute_code='weight')
self.assertEqual(1 + 2, scale.weigh_basket(basket))
def test_returns_correct_weight_for_nonempty_basket_with_line_quantities(self):
basket = factories.create_basket(empty=True)
products = [
(factories.create_product(attributes={'weight': '1'},
price=D('5.00')), 3),
(factories.create_product(attributes={'weight': '2'},
price=D('5.00')), 4)]
for product, quantity in products:
basket.add(product, quantity=quantity)
scale = Scale(attribute_code='weight')
self.assertEqual(1*3 + 2*4, scale.weigh_basket(basket))
| bsd-3-clause |
Anonymouslemming/ansible | lib/ansible/modules/network/illumos/dladm_vnic.py | 70 | 7064 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_vnic
short_description: Manage VNICs on Solaris/illumos systems.
description:
- Create or delete VNICs on Solaris/illumos systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- VNIC name.
required: true
link:
description:
- VNIC underlying link name.
required: true
temporary:
description:
- Specifies that the VNIC is temporary. Temporary VNICs
do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
mac:
description:
- Sets the VNIC's MAC address. Must be valid unicast MAC address.
required: false
default: false
aliases: [ "macaddr" ]
vlan:
description:
- Enable VLAN tagging for this VNIC. The VLAN tag will have id
I(vlan).
required: false
default: false
aliases: [ "vlan_id" ]
state:
description:
- Create or delete Solaris/illumos VNIC.
required: false
default: "present"
choices: [ "present", "absent" ]
'''
EXAMPLES = '''
# Create 'vnic0' VNIC over 'bnx0' link
- dladm_vnic:
name: vnic0
link: bnx0
state: present
# Create VNIC with specified MAC and VLAN tag over 'aggr0'
- dladm_vnic:
name: vnic1
link: aggr0
mac: '00:00:5E:00:53:23'
vlan: 4
# Remove 'vnic0' VNIC
- dladm_vnic:
name: vnic0
link: bnx0
state: absent
'''
RETURN = '''
name:
description: VNIC name
returned: always
type: string
sample: "vnic0"
link:
description: VNIC underlying link name
returned: always
type: string
sample: "igb0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: VNIC's persistence
returned: always
type: boolean
sample: "True"
mac:
description: MAC address to use for VNIC
returned: if mac is specified
type: string
sample: "00:00:5E:00:53:42"
vlan:
description: VLAN to use for VNIC
returned: success
type: int
sample: 42
'''
import re
class VNIC(object):
UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$'
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.link = module.params['link']
self.mac = module.params['mac']
self.vlan = module.params['vlan']
self.temporary = module.params['temporary']
self.state = module.params['state']
def vnic_exists(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('show-vnic')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def create_vnic(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('create-vnic')
if self.temporary:
cmd.append('-t')
if self.mac:
cmd.append('-m')
cmd.append(self.mac)
if self.vlan:
cmd.append('-v')
cmd.append(self.vlan)
cmd.append('-l')
cmd.append(self.link)
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_vnic(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('delete-vnic')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def is_valid_unicast_mac(self):
mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac)
return mac_re is None
def is_valid_vlan_id(self):
return 0 <= self.vlan <= 4095
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
link=dict(required=True),
mac=dict(default=None, aliases=['macaddr']),
vlan=dict(default=None, aliases=['vlan_id']),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
vnic = VNIC(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = vnic.name
result['link'] = vnic.link
result['state'] = vnic.state
result['temporary'] = vnic.temporary
if vnic.mac is not None:
if vnic.is_valid_unicast_mac():
module.fail_json(msg='Invalid unicast MAC address',
mac=vnic.mac,
name=vnic.name,
state=vnic.state,
link=vnic.link,
vlan=vnic.vlan)
result['mac'] = vnic.mac
if vnic.vlan is not None:
if vnic.is_valid_vlan_id():
module.fail_json(msg='Invalid VLAN tag',
mac=vnic.mac,
name=vnic.name,
state=vnic.state,
link=vnic.link,
vlan=vnic.vlan)
result['vlan'] = vnic.vlan
if vnic.state == 'absent':
if vnic.vnic_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vnic.delete_vnic()
if rc != 0:
module.fail_json(name=vnic.name, msg=err, rc=rc)
elif vnic.state == 'present':
if not vnic.vnic_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vnic.create_vnic()
if rc is not None and rc != 0:
module.fail_json(name=vnic.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mrquim/repository.mrquim | repo/script.module.unidecode/lib/unidecode/x05c.py | 252 | 4612 | data = (
'Po ', # 0x00
'Feng ', # 0x01
'Zhuan ', # 0x02
'Fu ', # 0x03
'She ', # 0x04
'Ke ', # 0x05
'Jiang ', # 0x06
'Jiang ', # 0x07
'Zhuan ', # 0x08
'Wei ', # 0x09
'Zun ', # 0x0a
'Xun ', # 0x0b
'Shu ', # 0x0c
'Dui ', # 0x0d
'Dao ', # 0x0e
'Xiao ', # 0x0f
'Ji ', # 0x10
'Shao ', # 0x11
'Er ', # 0x12
'Er ', # 0x13
'Er ', # 0x14
'Ga ', # 0x15
'Jian ', # 0x16
'Shu ', # 0x17
'Chen ', # 0x18
'Shang ', # 0x19
'Shang ', # 0x1a
'Mo ', # 0x1b
'Ga ', # 0x1c
'Chang ', # 0x1d
'Liao ', # 0x1e
'Xian ', # 0x1f
'Xian ', # 0x20
'[?] ', # 0x21
'Wang ', # 0x22
'Wang ', # 0x23
'You ', # 0x24
'Liao ', # 0x25
'Liao ', # 0x26
'Yao ', # 0x27
'Mang ', # 0x28
'Wang ', # 0x29
'Wang ', # 0x2a
'Wang ', # 0x2b
'Ga ', # 0x2c
'Yao ', # 0x2d
'Duo ', # 0x2e
'Kui ', # 0x2f
'Zhong ', # 0x30
'Jiu ', # 0x31
'Gan ', # 0x32
'Gu ', # 0x33
'Gan ', # 0x34
'Tui ', # 0x35
'Gan ', # 0x36
'Gan ', # 0x37
'Shi ', # 0x38
'Yin ', # 0x39
'Chi ', # 0x3a
'Kao ', # 0x3b
'Ni ', # 0x3c
'Jin ', # 0x3d
'Wei ', # 0x3e
'Niao ', # 0x3f
'Ju ', # 0x40
'Pi ', # 0x41
'Ceng ', # 0x42
'Xi ', # 0x43
'Bi ', # 0x44
'Ju ', # 0x45
'Jie ', # 0x46
'Tian ', # 0x47
'Qu ', # 0x48
'Ti ', # 0x49
'Jie ', # 0x4a
'Wu ', # 0x4b
'Diao ', # 0x4c
'Shi ', # 0x4d
'Shi ', # 0x4e
'Ping ', # 0x4f
'Ji ', # 0x50
'Xie ', # 0x51
'Chen ', # 0x52
'Xi ', # 0x53
'Ni ', # 0x54
'Zhan ', # 0x55
'Xi ', # 0x56
'[?] ', # 0x57
'Man ', # 0x58
'E ', # 0x59
'Lou ', # 0x5a
'Ping ', # 0x5b
'Ti ', # 0x5c
'Fei ', # 0x5d
'Shu ', # 0x5e
'Xie ', # 0x5f
'Tu ', # 0x60
'Lu ', # 0x61
'Lu ', # 0x62
'Xi ', # 0x63
'Ceng ', # 0x64
'Lu ', # 0x65
'Ju ', # 0x66
'Xie ', # 0x67
'Ju ', # 0x68
'Jue ', # 0x69
'Liao ', # 0x6a
'Jue ', # 0x6b
'Shu ', # 0x6c
'Xi ', # 0x6d
'Che ', # 0x6e
'Tun ', # 0x6f
'Ni ', # 0x70
'Shan ', # 0x71
'[?] ', # 0x72
'Xian ', # 0x73
'Li ', # 0x74
'Xue ', # 0x75
'Nata ', # 0x76
'[?] ', # 0x77
'Long ', # 0x78
'Yi ', # 0x79
'Qi ', # 0x7a
'Ren ', # 0x7b
'Wu ', # 0x7c
'Han ', # 0x7d
'Shen ', # 0x7e
'Yu ', # 0x7f
'Chu ', # 0x80
'Sui ', # 0x81
'Qi ', # 0x82
'[?] ', # 0x83
'Yue ', # 0x84
'Ban ', # 0x85
'Yao ', # 0x86
'Ang ', # 0x87
'Ya ', # 0x88
'Wu ', # 0x89
'Jie ', # 0x8a
'E ', # 0x8b
'Ji ', # 0x8c
'Qian ', # 0x8d
'Fen ', # 0x8e
'Yuan ', # 0x8f
'Qi ', # 0x90
'Cen ', # 0x91
'Qian ', # 0x92
'Qi ', # 0x93
'Cha ', # 0x94
'Jie ', # 0x95
'Qu ', # 0x96
'Gang ', # 0x97
'Xian ', # 0x98
'Ao ', # 0x99
'Lan ', # 0x9a
'Dao ', # 0x9b
'Ba ', # 0x9c
'Zuo ', # 0x9d
'Zuo ', # 0x9e
'Yang ', # 0x9f
'Ju ', # 0xa0
'Gang ', # 0xa1
'Ke ', # 0xa2
'Gou ', # 0xa3
'Xue ', # 0xa4
'Bei ', # 0xa5
'Li ', # 0xa6
'Tiao ', # 0xa7
'Ju ', # 0xa8
'Yan ', # 0xa9
'Fu ', # 0xaa
'Xiu ', # 0xab
'Jia ', # 0xac
'Ling ', # 0xad
'Tuo ', # 0xae
'Pei ', # 0xaf
'You ', # 0xb0
'Dai ', # 0xb1
'Kuang ', # 0xb2
'Yue ', # 0xb3
'Qu ', # 0xb4
'Hu ', # 0xb5
'Po ', # 0xb6
'Min ', # 0xb7
'An ', # 0xb8
'Tiao ', # 0xb9
'Ling ', # 0xba
'Chi ', # 0xbb
'Yuri ', # 0xbc
'Dong ', # 0xbd
'Cem ', # 0xbe
'Kui ', # 0xbf
'Xiu ', # 0xc0
'Mao ', # 0xc1
'Tong ', # 0xc2
'Xue ', # 0xc3
'Yi ', # 0xc4
'Kura ', # 0xc5
'He ', # 0xc6
'Ke ', # 0xc7
'Luo ', # 0xc8
'E ', # 0xc9
'Fu ', # 0xca
'Xun ', # 0xcb
'Die ', # 0xcc
'Lu ', # 0xcd
'An ', # 0xce
'Er ', # 0xcf
'Gai ', # 0xd0
'Quan ', # 0xd1
'Tong ', # 0xd2
'Yi ', # 0xd3
'Mu ', # 0xd4
'Shi ', # 0xd5
'An ', # 0xd6
'Wei ', # 0xd7
'Hu ', # 0xd8
'Zhi ', # 0xd9
'Mi ', # 0xda
'Li ', # 0xdb
'Ji ', # 0xdc
'Tong ', # 0xdd
'Wei ', # 0xde
'You ', # 0xdf
'Sang ', # 0xe0
'Xia ', # 0xe1
'Li ', # 0xe2
'Yao ', # 0xe3
'Jiao ', # 0xe4
'Zheng ', # 0xe5
'Luan ', # 0xe6
'Jiao ', # 0xe7
'E ', # 0xe8
'E ', # 0xe9
'Yu ', # 0xea
'Ye ', # 0xeb
'Bu ', # 0xec
'Qiao ', # 0xed
'Qun ', # 0xee
'Feng ', # 0xef
'Feng ', # 0xf0
'Nao ', # 0xf1
'Li ', # 0xf2
'You ', # 0xf3
'Xian ', # 0xf4
'Hong ', # 0xf5
'Dao ', # 0xf6
'Shen ', # 0xf7
'Cheng ', # 0xf8
'Tu ', # 0xf9
'Geng ', # 0xfa
'Jun ', # 0xfb
'Hao ', # 0xfc
'Xia ', # 0xfd
'Yin ', # 0xfe
'Yu ', # 0xff
)
| gpl-2.0 |
daviddeneroff/daviddeneroff.github.io | node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/django/contrib/gis/db/models/lookups.py | 84 | 10596 | from __future__ import unicode_literals
import re
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
if hasattr(self.rhs, '_as_sql'):
# If rhs is some QuerySet, don't touch it
return rhs, rhs_params
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql}
backend_op = connection.ops.gis_operators[self.lookup_name]
return backend_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def get_db_prep_lookup(self, value, connection):
if isinstance(value, (tuple, list)):
if not 2 <= len(value) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(value[0])]
# Getting the distance parameter in the units of the field.
params += connection.ops.get_distance(self.lhs.output_field, value[1:], self.lookup_name)
return ('%s', params)
else:
return super(DistanceLookupBase, self).get_db_prep_lookup(value, connection)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| mit |
tseaver/gcloud-python | runtimeconfig/tests/unit/test_client.py | 3 | 1581 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.runtimeconfig.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_config(self):
PROJECT = 'PROJECT'
CONFIG_NAME = 'config_name'
creds = _make_credentials()
client_obj = self._make_one(project=PROJECT, credentials=creds)
new_config = client_obj.config(CONFIG_NAME)
self.assertEqual(new_config.name, CONFIG_NAME)
self.assertIs(new_config._client, client_obj)
self.assertEqual(new_config.project, PROJECT)
self.assertEqual(new_config.full_name,
'projects/%s/configs/%s' % (PROJECT, CONFIG_NAME))
self.assertFalse(new_config.description)
| apache-2.0 |
eliasdesousa/indico | indico/modules/events/registration/stats.py | 2 | 19714 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import division, unicode_literals
from collections import defaultdict, namedtuple
from itertools import chain, groupby
from indico.modules.events.registration.models.registrations import RegistrationData
from indico.util.date_time import now_utc
from indico.util.i18n import _
class StatsBase(object):
def __init__(self, title, subtitle, type, **kwargs):
"""Base class for registration form statistics
:param title: str -- the title for the stats box
:param subtitle: str -- the subtitle for the stats box
:param type: str -- the type used in Jinja to display the stats
"""
super(StatsBase, self).__init__(**kwargs)
self.title = title
self.subtitle = subtitle
self.type = type
@property
def is_currency_shown(self):
return False
class Cell(namedtuple('Cell', ['type', 'data', 'colspan', 'classes', 'qtip'])):
"""Hold data and type for a cell of a stats table"""
_type_defaults = {
'str': '',
'progress-stacked': [[0], '0'],
'progress': (0, '0'),
'currency': 0,
'icon': 'warning'
}
def __new__(cls, type='default', data=None, colspan=1, classes=None, qtip=None):
"""
The table below indicates the valid types and expected data.
+--------------------+-----------------------------------------+
| type | data |
+====================+=========================================+
| `str` | `str` -- string value |
+--------------------+-----------------------------------------+
| `progress` | `(int, str)` -- a tuple with the |
| | progress (a value between 0 and 1) and |
| | a label |
+--------------------+-----------------------------------------+
| `progress-stacked` | `([int], str)` -- a tuple with a list |
| | of progresses (values which must sum up |
| | to 1) and a label |
+--------------------+-----------------------------------------+
| `currency` | `float` -- numeric value |
+--------------------+-----------------------------------------+
| `icon` | `str` -- icon name from `_icons.scss` |
+--------------------+-----------------------------------------+
| `default` | `None` -- renders a default cell with |
| | an `—` (use `Cell(type='str')` |
| | for an empty cell) |
+--------------------+-----------------------------------------+
:param type: str -- The type of data in the cell
:param data: The data for the cell
:param colspan: int -- HTML colspan value for the cell
:param classes: [str] -- HTML classes to apply to the cell
:param qtip: str -- content for qtip
"""
if classes is None:
classes = []
if data is None:
data = Cell._type_defaults.get(type, None)
return super(Cell, cls).__new__(cls, type, data, colspan, classes, qtip)
class DataItem(namedtuple('DataItem', ['regs', 'attendance', 'capacity', 'billable', 'cancelled', 'price',
'fixed_price', 'paid', 'paid_amount', 'unpaid', 'unpaid_amount'])):
def __new__(cls, regs=0, attendance=0, capacity=0, billable=False, cancelled=False, price=0, fixed_price=False,
paid=0, paid_amount=0, unpaid=0, unpaid_amount=0):
"""
Holds the aggregation of some data, intended for stats tables as
a aggregation from which to generate cells.
:param regs: int -- number of registrant
:param attendance: int -- number of people attending
:param capacity: int -- maximum number of people allowed to
attend (`0` if unlimited)
:param billable: bool -- whether the item is billable to the or
not
:param cancelled: bool -- whether the item is cancelled or not
:param price: str -- the price of the item
:param fixed_price: bool -- `True` if the price is per
registrant, `False` if accompanying guests
must pay as well.
:param paid: int -- number of registrants who paid
:param paid_amount: float -- amount already paid by registrants
:param unpaid: int -- number of registrants who haven't paid
:param unpaid_amount: float -- amount not already paid by
registrants
"""
return super(DataItem, cls).__new__(cls, regs, attendance, capacity, billable, cancelled, price, fixed_price,
paid, paid_amount, unpaid, unpaid_amount)
class FieldStats(object):
"""Holds stats for a registration form field"""
def __init__(self, field, **kwargs):
kwargs.setdefault('type', 'table')
super(FieldStats, self).__init__(**kwargs)
self._field = field
self._regitems = self._get_registration_data(field)
self._choices = self._get_choices(field)
self._data, self._show_billing_info = self._build_data()
@property
def is_currency_shown(self):
return self._show_billing_info
def _get_choices(self, field):
return {choice['id']: choice for choice in field.current_data.versioned_data['choices']}
def _get_registration_data(self, field):
registration_ids = [r.id for r in field.registration_form.active_registrations]
field_data_ids = [data.id for data in field.data_versions]
return RegistrationData.find_all(RegistrationData.registration_id.in_(registration_ids),
RegistrationData.field_data_id.in_(field_data_ids),
RegistrationData.data != {})
def _build_data(self):
"""Build data from registration data and field choices
:returns: (dict, bool) -- the data and a boolean to indicate
whether the data contains billing information or not.
"""
choices = defaultdict(dict)
data = defaultdict(list)
regitems = sorted(self._regitems, key=self._build_key)
for k, regitems in groupby((regitem for regitem in regitems if regitem.price), key=self._build_key):
choices['billed'][k] = self._build_regitems_data(k, list(regitems))
for k, regitems in groupby((regitem for regitem in regitems if not regitem.price), key=self._build_key):
choices['not_billed'][k] = self._build_regitems_data(k, list(regitems))
for item in self._choices.itervalues():
key = 'billed' if item['price'] else 'not_billed'
choices[key].setdefault(self._build_key(item), self._build_choice_data(item))
for key, choice in chain(choices['billed'].iteritems(), choices['not_billed'].iteritems()):
data[key[:2]].append(choice)
return data, bool(choices['billed'])
def get_table(self):
"""Returns a table containing the stats for each item.
:return: dict -- A table with a list of head cells
(key: `'head'`) and a list of rows (key: `'rows'`)
where each row is a list of cells.
"""
table = defaultdict(list)
table['head'] = self._get_table_head()
for (name, id), data_items in sorted(self._data.iteritems()):
total_regs = sum(detail.regs for detail in data_items)
table['rows'].append(('single-row' if len(data_items) == 1 else 'header-row',
self._get_main_row_cells(data_items, name, total_regs) +
self._get_billing_cells(data_items)))
if len(data_items) == 1:
continue
table['rows'].extend(('sub-row',
self._get_sub_row_cells(data_item, total_regs) +
self._get_billing_details_cells(data_item))
for data_item in data_items)
return table
def _get_billing_cells(self, data_items):
"""Return cells with billing information from data items
:params data_items: [DataItem] -- Data items containing billing info
:returns: [Cell] -- Cells containing billing information.
"""
if not self._show_billing_info:
return []
if len(data_items) == 1:
return self._get_billing_details_cells(data_items[0])
paid = sum(detail.paid for detail in data_items if detail.billable)
paid_amount = sum(detail.paid_amount for detail in data_items if detail.billable)
unpaid = sum(detail.unpaid for detail in data_items if detail.billable)
unpaid_amount = sum(detail.unpaid_amount for detail in data_items if detail.billable)
total = paid + unpaid
total_amount = paid_amount + unpaid_amount
progress = [[paid / total, unpaid / total], '{} / {}'.format(paid, total)] if total else None
return [Cell(),
Cell(type='progress-stacked', data=progress, classes=['paid-unpaid-progress']),
Cell(type='currency', data=paid_amount, classes=['paid-amount', 'stick-left']),
Cell(type='currency', data=unpaid_amount, classes=['unpaid-amount', 'stick-right']),
Cell(type='currency', data=total_amount)]
def _get_billing_details_cells(self, detail):
"""Return cells with detailed billing information
:params item_details: DataItem -- Data items containing billing info
:returns: [Cell] -- Cells containing billing information.
"""
if not self._show_billing_info:
return []
if not detail.billable:
return [Cell(type='currency', data=0),
Cell(),
Cell(type='currency', data=0, classes=['paid-amount', 'stick-left']),
Cell(type='currency', data=0, classes=['unpaid-amount', 'stick-right']),
Cell(type='currency', data=0)]
progress = [[detail.paid / detail.regs, detail.unpaid / detail.regs],
'{0.paid} / {0.regs}'.format(detail)] if detail.regs else None
return [Cell(type='currency', data=float(detail.price)),
Cell(type='progress-stacked', data=progress, classes=['paid-unpaid-progress']),
Cell(type='currency', data=detail.paid_amount, classes=['paid-amount', 'stick-left']),
Cell(type='currency', data=detail.unpaid_amount, classes=['unpaid-amount', 'stick-right']),
Cell(type='currency', data=detail.paid_amount + detail.unpaid_amount)]
def _build_key(self, item):
"""Return the key to sort and group field choices
It must include the caption and the id of the item as well as other
billing information by which to aggregate.
:param item: the item from which to derive a key.
:returns: tuple -- tuple defining the key.
"""
raise NotImplementedError
def _build_regitems_data(self, key, regitems):
"""Return a `DataItem` aggregating data from registration items
:param regitems: list -- list of registrations items to be aggregated
:returns: DataItem -- the data aggregation
"""
raise NotImplementedError
def _build_choice_data(self, item):
"""
Returns a `DataItem` containing the aggregation of an item which
is billed to the registrants.
:param item: list -- item to be aggregated
:returns: DataItem -- the aggregation of the `item`
"""
raise NotImplementedError
def _get_table_head(self):
"""
Returns a list of `Cell` corresponding to the headers of a the
table.
:returns: [Cell] -- the headers of the table.
"""
raise NotImplementedError
def _get_main_row_cells(self, item_details, choice_caption, total_regs):
"""
Returns the cells of the main (header or single) row of the table.
Each `item` has a main row. The row is a list of `Cell` which matches
the table head.
:param item_details: [DataItem] -- list of aggregations for the
item
:param choice_caption: str -- the item's name
:param total_regs: int -- the number of registrations for the item
:returns: [Cell] -- the list of cells constituting the row.
"""
raise NotImplementedError
def _get_sub_row_cells(self, details, total_regs):
"""
Returns the cells of the sub row of the table.
An `item` can have a sub row. The row is a list of `Cell` which
matches the table head.
:param details: DataItem -- aggregation for the item
:param total_regs: int -- the number of registrations for the item
:returns: [Cell] -- the list of cells constituting the row.
"""
raise NotImplementedError
class OverviewStats(StatsBase):
"""Generic stats for a registration form"""
def __init__(self, regform):
super(OverviewStats, self).__init__(title=_("Overview"), subtitle="", type='overview')
self.regform = regform
self.registrations = regform.active_registrations
self.countries, self.num_countries = self._get_countries()
self.availability = self._get_availibility()
self.days_left = max((self.regform.end_dt - now_utc()).days, 0) if self.regform.end_dt else 0
def _get_countries(self):
countries = defaultdict(int)
for country, regs in groupby(self.registrations, lambda x: x.get_personal_data().get('country')):
if country is None:
continue
countries[country] += sum(1 for x in regs)
if not countries:
return [], 0
# Sort by highest number of people per country then alphabetically per countries' name
countries = sorted(((val, name) for name, val in countries.iteritems()),
key=lambda x: (-x[0], x[1]), reverse=True)
return countries[-15:], len(countries)
def _get_availibility(self):
limit = self.regform.registration_limit
if not limit or self.regform.limit_reached:
return (0, 0, 0)
return (len(self.registrations), limit, len(self.registrations) / limit)
class AccommodationStats(FieldStats, StatsBase):
def __init__(self, field):
super(AccommodationStats, self).__init__(title=_("Accommodation"), subtitle=field.title, field=field)
self.has_capacity = any(detail.capacity for acco_details in self._data.itervalues()
for detail in acco_details if detail.capacity)
def _get_occupancy(self, acco_details):
if not self.has_capacity:
return []
capacity = max(d.capacity for d in acco_details)
if not capacity:
return [Cell()]
regs = sum(d.regs for d in acco_details)
return [Cell(type='progress', data=(regs / capacity, '{} / {}'.format(regs, capacity)))]
def _get_occupancy_details(self, details):
if not self.has_capacity:
return []
if not details.capacity:
return [Cell()]
return [Cell(type='progress',
data=(details.regs / details.capacity, '{0.regs} / {0.capacity}'.format(details)))]
def _build_key(self, obj):
choice_id = obj.data['choice'] if isinstance(obj, RegistrationData) else obj['id']
choice_price = obj.price if isinstance(obj, RegistrationData) else obj['price']
choice_caption = self._field.data['captions'][choice_id]
return choice_caption, choice_id, choice_price
def _build_regitems_data(self, key, regitems):
name, id, price = key
choices = lambda r: {choice['id']: choice for choice in r.field_data.versioned_data['choices']}
data = {'regs': len(regitems),
'capacity': next((choices(regitem)[regitem.data['choice']]['places_limit'] for regitem in regitems), 0),
'cancelled': any(not choices(regitem)[regitem.data['choice']]['is_enabled'] for regitem in regitems),
'billable': bool(price)}
if data['billable']:
data['price'] = price
data['paid'] = sum(1 for regitem in regitems if regitem.registration.is_paid)
data['paid_amount'] = sum(float(price) for regitem in regitems if regitem.registration.is_paid)
data['unpaid'] = sum(1 for regitem in regitems if not regitem.registration.is_paid)
data['unpaid_amount'] = sum(float(price) for regitem in regitems if not regitem.registration.is_paid)
return DataItem(**data)
def _build_choice_data(self, choice):
data = {'capacity': choice['places_limit'],
'cancelled': not choice['is_enabled'],
'billable': bool(choice['price'])}
if choice['price']:
data['price'] = choice['price']
return DataItem(**data)
def _get_table_head(self):
head = [Cell(type='str', data=_("Accomodation")), Cell(type='str', data=_("Registrants"))]
if self.has_capacity:
head.append(Cell(type='str', data=_("Occupancy")))
if self._show_billing_info:
head.extend([Cell(type='str', data=_("Price")),
Cell(type='str', data=_("Accommodations paid")),
Cell(type='str', data=_("Total paid (unpaid)"), colspan=2),
Cell(type='str', data=_("Total"))])
return head
def _get_main_row_cells(self, data_items, choice_caption, total_regs):
active_registrations = self._field.registration_form.active_registrations
cancelled = any(d.cancelled for d in data_items)
return [
Cell(type='str', data=' ' + choice_caption, classes=['cancelled-item'] if cancelled else []),
Cell(type='progress', data=((total_regs / len(active_registrations),
'{} / {}'.format(total_regs, len(active_registrations)))
if active_registrations else None))
] + self._get_occupancy(data_items)
def _get_sub_row_cells(self, data_item, total_regs):
return [
Cell(type='str'),
Cell(type='progress', data=((data_item.regs / total_regs,
'{} / {}'.format(data_item.regs, total_regs))
if total_regs else None)),
] + self._get_occupancy_details(data_item)
| gpl-3.0 |
yestech/gae-django-template | django/template/context.py | 97 | 6298 | from copy import copy
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.http import HttpRequest
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class EmptyClass(object):
# No-op class which takes no args to its __init__ method, to help implement
# __copy__
pass
class BaseContext(object):
def __init__(self, dict_=None):
dict_ = dict_ or {}
self.dicts = [dict_]
def __copy__(self):
duplicate = EmptyClass()
duplicate.__class__ = self.__class__
duplicate.__dict__ = self.__dict__.copy()
duplicate.dicts = duplicate.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self):
d = {}
self.dicts.append(d)
return d
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None, use_l10n=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.current_app = current_app
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
def new(self, values=None):
"""
Returns a new Context with the same 'autoescape' value etc, but with
only the values given in 'values' stored.
"""
return self.__class__(dict_=values, autoescape=self.autoescape,
current_app=self.current_app, use_l10n=self.use_l10n)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
d = self.dicts[-1]
if key in d:
return d[key]
return otherwise
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict=None, processors=None, current_app=None, use_l10n=None):
Context.__init__(self, dict, current_app=current_app, use_l10n=use_l10n)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
self.update(processor(request))
| bsd-3-clause |
you21979/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py | 130 | 2470 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.tool.steps.abstractstep import AbstractStep
class PostDiffForRevert(AbstractStep):
def run(self, state):
comment_text = "Any committer can land this patch automatically by \
marking it commit-queue+. The commit-queue will build and test \
the patch before landing to ensure that the rollout will be \
successful. This process takes approximately 15 minutes.\n\n\
If you would like to land the rollout faster, you can use the \
following command:\n\n\
webkit-patch land-attachment ATTACHMENT_ID\n\n\
where ATTACHMENT_ID is the ID of this attachment."
self._tool.bugs.add_patch_to_bug(
state["bug_id"],
self.cached_lookup(state, "diff"),
"%s%s" % (Attachment.rollout_preamble, state["revision"]),
comment_text=comment_text,
mark_for_review=False,
mark_for_commit_queue=True)
| bsd-3-clause |
ptemplier/ansible | lib/ansible/modules/system/osx_defaults.py | 26 | 14037 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: osx_defaults
author: Franck Nijhof (@frenck)
short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible
description:
- osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts.
Mac OS X applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications aren't running (such as default font for new
documents, or the position of an Info panel).
version_added: "2.0"
options:
domain:
description:
- The domain is a domain name of the form com.companyname.appname.
required: false
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply. The special value "currentHost" corresponds to the
"-currentHost" switch of the defaults commandline tool.
required: false
default: null
version_added: "2.1"
key:
description:
- The key of the user preference
required: true
type:
description:
- The type of value to write.
required: false
default: string
choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
required: false
default: false
choices: [ "true", "false" ]
value:
description:
- The value to write. Only required when state = present.
required: false
default: null
state:
description:
- The state of the user defaults
required: false
default: present
choices: [ "present", "absent" ]
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = '''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: string
value: Centimeters
state: present
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: string
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
import datetime
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type, text_type
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
pass
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
def __init__(self, **kwargs):
# Initial var for storing current defaults value
self.current_value = None
# Just set all given parameters
for key, val in kwargs.items():
setattr(self, key, val)
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# When state is present, we require a parameter
if self.state == "present" and self.value is None:
raise OSXDefaultsException("Missing value parameter")
# Ensure the value is the correct type
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
""" Converts value to given type """
def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
if isinstance(value, (binary_type, text_type)):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
""" Returns a normalized list of commandline arguments based on the "host" attribute """
def _host_args(self):
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
""" Returns a list containing the "defaults" executable and any common base arguments """
def _base_command(self):
return [self.executable] + self._host_args()
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exists
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
# Ok, lets parse the type from output
type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
# Convert string to list when type is array
if type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(type, out)
""" Writes value to this domain & key to defaults """
def write(self):
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and \
len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(
default="NSGlobalDomain",
required=False,
),
host=dict(
default=None,
required=False,
),
key=dict(
default=None,
),
type=dict(
default="string",
required=False,
choices=[
"array",
"bool",
"boolean",
"date",
"float",
"int",
"integer",
"string",
],
),
array_add=dict(
default=False,
required=False,
type='bool',
),
value=dict(
default=None,
required=False,
type='raw'
),
state=dict(
default="present",
required=False,
choices=[
"absent", "present"
],
),
path=dict(
default="/usr/bin:/usr/local/bin",
required=False,
)
),
supports_check_mode=True,
)
domain = module.params['domain']
host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
value = module.params['value']
state = module.params['state']
path = module.params['path']
try:
defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
except OSXDefaultsException as e:
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()
| gpl-3.0 |
Rona111/sale_commission | __unported__/picking_invoice_pricelist/stock.py | 3 | 2688 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Pexego (<www.pexego.es>). All Rights Reserved
# $Santiago Argüeso Armesto$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_price_unit_invoice(self, cursor, user, move_line, type):
if not move_line.sale_line_id and not move_line.purchase_line_id:
if not move_line.picking_id.address_id:
return super(stock_picking, self)._get_price_unit_invoice(cursor, user, move_line, type)
if type == 'out_invoice':
pricelist = move_line.picking_id.address_id.partner_id.property_product_pricelist and move_line.picking_id.address_id.partner_id.property_product_pricelist.id or False
else:
pricelist = move_line.picking_id.address_id.partner_id.property_product_pricelist_purchase and move_line.picking_id.address_id.partner_id.property_product_pricelist_purchase.id or False
price = self.pool.get('product.pricelist').price_get(cursor, user, [pricelist],
move_line.product_id.id, move_line.product_qty or 1.0, move_line.picking_id.address_id.partner_id.id, {
'uom': move_line.product_uom.id,
'date': move_line.date,
})[pricelist]
uom_id = move_line.product_id.uom_id.id
uos_id = move_line.product_id.uos_id and move_line.product_id.uos_id.id or False
coeff = move_line.product_id.uos_coeff
if uom_id != uos_id and coeff != 0:
price_unit = price / coeff
return price_unit
return price
return super(stock_picking, self)._get_price_unit_invoice(cursor, user, move_line, type)
stock_picking()
| agpl-3.0 |
brianlsharp/MissionPlanner | Lib/site-packages/numpy/core/code_generators/generate_numpy_api.py | 53 | 7203 | import os
import genapi
from genapi import TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
h_template = r"""
#ifdef _MULTIARRAYMODULE
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version %%x but this version of numpy is %%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version %%x but this version of numpy is %%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
global_vars_types = sources[1]
scalar_bool_values = sources[2]
types_api = sources[3]
multiarray_funcs = sources[4]
# Remove global_vars_type: not a api dict
multiarray_api = sources[:1] + sources[2:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, f.return_type,
f.args, api_name)
for name, index in global_vars.items():
type = global_vars_types[name]
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, index in scalar_bool_values.items():
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, index in types_api.items():
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
a = set(multiarray_api_dict.keys())
b = set(multiarray_api_index)
print "Set difference %s : %s" % ((a - b), (b - a))
assert len(multiarray_api_dict) == len(multiarray_api_index)
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
| gpl-3.0 |
matllubos/django-is-core | is_core/utils/__init__.py | 1 | 13554 | import re
import json
import inspect
import types
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.contrib.admin.utils import display_for_value as admin_display_for_value
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Model, QuerySet
from django.core.exceptions import FieldDoesNotExist
from django.forms.utils import pretty_name
from django.utils.encoding import force_text
from django.utils.translation import ugettext
from django.utils.html import format_html, format_html_join
from chamber.utils import call_function_with_unknown_input
from pyston.converters import get_converter
from pyston.serializer import get_resource_class_or_none
PK_PATTERN = r'(?P<pk>[^/]+)'
NUMBER_PK_PATTERN = '(?P<pk>\d+)'
EMPTY_VALUE = '---'
LOOKUP_SEP = '__'
METHOD_OBJ_STR_NAME = '_obj_name'
def is_callable(val):
return hasattr(val, '__call__')
def get_new_class_name(prefix, klass):
prefix = prefix.replace('-', ' ').title()
prefix = re.sub(r'\s+', '', prefix)
return prefix + klass.__name__
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for _, opts in fieldsets or ():
if 'fieldsets' in opts:
field_names += flatten_fieldsets(opts.get('fieldsets'))
else:
for field in opts.get('fields', ()):
if isinstance(field, (list, tuple)):
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_inline_views_from_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
inline_views = []
for _, opts in fieldsets or ():
if 'fieldsets' in opts:
inline_views += get_inline_views_from_fieldsets(opts.get('fieldsets'))
elif 'inline_view_inst' in opts:
inline_views.append(opts.get('inline_view_inst'))
return inline_views
def get_inline_views_opts_from_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
inline_views = []
for _, opts in fieldsets or ():
if 'fieldsets' in opts:
inline_views += get_inline_views_opts_from_fieldsets(opts.get('fieldsets'))
elif 'inline_view' in opts:
inline_views.append(opts)
return inline_views
def get_field_from_model_or_none(model, field_name):
"""
Return field from model. If field doesn't exists null is returned instead of exception.
"""
try:
return model._meta.get_field(field_name)
except (FieldDoesNotExist, AttributeError):
return None
def get_field_label_from_path(model, field_path, view=None, field_labels=None):
"""
Return field label of model class for input field path. For every field name in the field path is firstly get the
right label and these labels are joined with " - " separator to one string.
field_label input parameter can affect the result value. Example:
* field_path='user__email', field_labels={} => 'user email' # default values get from fields
* field_path='user__email', field_labels={'user__email': 'e-mail'} => 'e-mail' # full value is replaced
* field_path='user__email', field_labels={'user': 'customer'} => 'customer - email' # related field prefix is changed
* field_path='user', field_labels={'user': 'customer'} => 'customer' # full value is replaced
* field_path='user', field_labels={'user__': 'customer'} => 'user' # has no effect
* field_path='user__email', field_labels={'user__': 'customer'} => 'customer email' # related field prefix is changed
* field_path='user__email', field_labels={'user__': None} => 'email' # related field prefix is ignored
* field_path='user__email', field_labels={'email': 'e-mail'} => 'user email' # has no effect
:param model: Django model class
:param field_path: field names separated with "__"
:param view: view instance
:param field_labels: dict of field labels which can override result field name
:return: field label
"""
from .field_api import get_field_descriptors_from_path
field_labels = {} if field_labels is None else field_labels
field_descriptors = get_field_descriptors_from_path(model, field_path, view)
used_field_names = []
field_descriptor_labels = []
for field_descriptor in field_descriptors:
field_path_prefix = LOOKUP_SEP.join(used_field_names)
current_field_path = LOOKUP_SEP.join(used_field_names + [field_descriptor.field_name])
if field_descriptor_labels and field_path_prefix + LOOKUP_SEP in field_labels:
if field_labels[field_path_prefix + LOOKUP_SEP] is not None:
field_descriptor_labels = [field_labels[field_path_prefix + LOOKUP_SEP]]
else:
field_descriptor_labels = []
if current_field_path in field_labels:
if field_labels[current_field_path] is not None:
field_descriptor_labels = [field_labels[current_field_path]]
else:
field_descriptor_labels = []
elif field_descriptor.field_name != METHOD_OBJ_STR_NAME or not field_descriptor_labels:
if field_descriptor.get_label() is not None:
field_descriptor_labels.append(field_descriptor.get_label())
used_field_names.append(field_descriptor.field_name)
return ' - '.join([str(label) for label in field_descriptor_labels if label is not None])
def get_field_widget_from_path(model, field_path, view=None):
"""
Return form widget to show value get from model instance and field_path
"""
from .field_api import get_field_descriptors_from_path
return get_field_descriptors_from_path(model, field_path, view)[-1].get_widget()
def get_readonly_field_value_from_path(instance, field_path, request=None, view=None):
"""
Return ReadonlyValue instance which contains value and humanized value get from model instance and field_path
"""
from .field_api import get_field_value_from_path
return get_field_value_from_path(instance, field_path, request, view, return_readonly_value=True)
def get_readonly_field_data(instance, field_name, request, view=None, field_labels=None):
"""
Returns field humanized value, label and widget which are used to display of instance or view readonly data.
Args:
field_name: name of the field which will be displayed
instance: model instance
view: view instance
field_labels: dict of field labels which rewrites the generated field label
Returns:
field humanized value, label and widget which are used to display readonly data
"""
return (
get_readonly_field_value_from_path(instance, field_name, request, view),
get_field_label_from_path(instance.__class__, field_name, view, field_labels),
get_field_widget_from_path(instance.__class__, field_name, view)
)
def display_object_data(obj, field_name, request, view=None):
"""
Returns humanized value of model object that can be rendered to HTML or returned as part of REST
examples:
boolean True/False ==> Yes/No
objects ==> object display name with link if current user has permissions to see the object
field with choices ==> string value of choice
field with humanize function ==> result of humanize function
"""
return display_for_value(get_readonly_field_value_from_path(obj, field_name, request, view), request=request)
def display_code(value):
"""
Display input value as a code.
"""
return format_html(
'<pre style="max-height: 400px">{}</pre>',
value
) if value else display_for_value(value)
def display_json(value):
"""
Display input JSON as a code
"""
if value is None:
return display_for_value(value)
if isinstance(value, str):
value = json.loads(value)
return display_code(json.dumps(value, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder))
def display_for_value(value, request=None):
"""
Converts humanized value
examples:
boolean True/False ==> Yes/No
objects ==> object display name with link if current user has permissions to see the object
datetime ==> in localized format
list ==> values separated with ","
dict ==> string formatted with HTML ul/li tags
"""
from is_core.forms.utils import ReadonlyValue
if isinstance(value, ReadonlyValue):
value = value.value
if request and isinstance(value, Model):
return render_model_object_with_link(request, value)
elif isinstance(value, (QuerySet, list, tuple, set, types.GeneratorType)):
return format_html(
'<ol class="field-list">{}</ol>',
format_html_join(
'\n',
'<li>{}</li>',
(
(display_for_value(v, request),) for v in value
)
)
)
elif isinstance(value, dict):
return format_html(
'<ul class="field-dict">{}</ul>',
format_html_join(
'\n',
'{}{}',
(
(
format_html('<li>{}</li>', k),
(
display_for_value(v, request) if isinstance(v, dict)
else format_html(
'<ul class="field-dict"><li>{}</li></ul>',
display_for_value(v, request)
)
)
)
for k, v in value.items()
)
)
)
elif isinstance(value, bool):
return ugettext('Yes') if value else ugettext('No')
else:
return admin_display_for_value(value, EMPTY_VALUE)
def get_url_from_model_core(request, obj):
"""
Returns object URL from model core.
"""
from is_core.site import get_model_core
model_core = get_model_core(obj.__class__)
if model_core and hasattr(model_core, 'ui_patterns'):
edit_pattern = model_core.ui_patterns.get('detail')
return (
edit_pattern.get_url_string(request, obj=obj)
if edit_pattern and edit_pattern.has_permission('get', request, obj=obj) else None
)
else:
return None
def get_obj_url(request, obj):
"""
Returns object URL if current logged user has permissions to see the object
"""
if (is_callable(getattr(obj, 'get_absolute_url', None)) and
(not hasattr(obj, 'can_see_edit_link') or
(is_callable(getattr(obj, 'can_see_edit_link', None)) and obj.can_see_edit_link(request)))):
return call_function_with_unknown_input(obj.get_absolute_url, request=request)
else:
return get_url_from_model_core(request, obj)
def render_model_object_with_link(request, obj, display_value=None):
if obj is None:
return '[{}]'.format(ugettext('missing object'))
obj_url = get_obj_url(request, obj)
display_value = str(obj) if display_value is None else str(display_value)
return format_html('<a href="{}">{}</a>', obj_url, display_value) if obj_url else display_value
def render_model_objects_with_link(request, objs):
return format_html_join(', ', '{}', ((render_model_object_with_link(request, obj),) for obj in objs))
def header_name_to_django(header_name):
return '_'.join(('HTTP', header_name.replace('-', '_').upper()))
def pretty_class_name(class_name):
return re.sub(r'(\w)([A-Z])', r'\1-\2', class_name).lower()
def get_export_types_with_content_type(export_types):
generated_export_types = []
for title, type, serialization_format in export_types:
try:
generated_export_types.append(
(title, type, serialization_format, get_converter(type).media_type)
)
except KeyError:
raise ImproperlyConfigured('Missing converter for type {}'.format(type))
return generated_export_types
def get_link_or_none(pattern_name, request, view_kwargs=None):
"""
Helper that generate URL prom pattern name and kwargs and check if current request has permission to open the URL.
If not None is returned.
Args:
pattern_name (str): slug which is used for view registratin to pattern
request (django.http.request.HttpRequest): Django request object
view_kwargs (dict): list of kwargs necessary for URL generator
Returns:
"""
from is_core.patterns import reverse_pattern
pattern = reverse_pattern(pattern_name)
assert pattern is not None, 'Invalid pattern name {}'.format(pattern_name)
if pattern.has_permission('get', request, view_kwargs=view_kwargs):
return pattern.get_url_string(request, view_kwargs=view_kwargs)
else:
return None
class GetMethodFieldMixin:
@classmethod
def get_method_returning_field_value(cls, field_name):
"""
Method should return object method that can be used to get field value.
Args:
field_name: name of the field
Returns: method for obtaining a field value
"""
method = getattr(cls, field_name, None)
return method if method and callable(method) else None
def get_model_name(model):
return str(model._meta.model_name)
| bsd-3-clause |
bolkedebruin/airflow | airflow/sensors/bash.py | 1 | 3432 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from subprocess import PIPE, STDOUT, Popen
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class BashSensor(BaseSensorOperator):
"""
Executes a bash command/script and returns True if and only if the
return code is 0.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: output encoding of bash command.
:type output_encoding: str
"""
template_fields = ('bash_command', 'env')
@apply_defaults
def __init__(self,
bash_command,
env=None,
output_encoding='utf-8',
*args, **kwargs):
super().__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
def poke(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
bash_command = self.bash_command
self.log.info("Tmp dir root location: \n %s", gettempdir())
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bytes(bash_command, 'utf_8'))
f.flush()
fname = f.name
script_location = tmp_dir + "/" + fname
self.log.info("Temporary script location: %s", script_location)
self.log.info("Running command: %s", bash_command)
sp = Popen(
['bash', fname],
stdout=PIPE, stderr=STDOUT,
close_fds=True, cwd=tmp_dir,
env=self.env, preexec_fn=os.setsid)
self.sp = sp
self.log.info("Output:")
for line in iter(sp.stdout.readline, b''):
line = line.decode(self.output_encoding).strip()
self.log.info(line)
sp.wait()
self.log.info("Command exited with return code %s", sp.returncode)
return not sp.returncode
| apache-2.0 |
AyoubZahid/odoo | addons/purchase/res_config.py | 33 | 3680 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_config_settings(osv.osv_memory):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
_columns = {
'group_product_variant': fields.selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant'),
'group_uom':fields.selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/puchased in different unit of measures (advanced)')
], "Unit of Measures",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_costing_method':fields.selection([
(0, 'Set a fixed cost price on each product'),
(1, "Use a 'Fixed', 'Real' or 'Average' price costing method")
], "Costing Methods",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost."""),
'module_purchase_requisition': fields.selection([
(0, 'Purchase propositions trigger draft purchase orders to a single supplier'),
(1, 'Allow using call for tenders to get quotes from multiple suppliers (advanced)')
], "Calls for Tenders",
help="""Calls for tenders are used when you want to generate requests for quotations to several vendors for a given set of products.
You can configure per product if you directly do a Request for Quotation
to one vendor or if you want a Call for Tenders to compare offers from several vendors."""),
'group_advance_purchase_requisition': fields.selection([
(0, 'Simple call for tender (only choose from one RFQ)'),
(1, 'Advanced call for tender (choose products from different RFQ)')
], "Advanced Calls for Tenders",
implied_group='purchase.group_advance_bidding',
help="""In the process of a public tendering, you can compare the tender lines and choose for each requested product which quantity you will buy from each bid."""),
'module_stock_dropshipping': fields.selection([
(0, 'Suppliers always deliver to your warehouse(s)'),
(1, "Allow suppliers to deliver directly to your customers")
], "Dropshipping",
help='\nCreates the dropship Route and add more complex tests'
'-This installs the module stock_dropshipping.'),
'group_manage_vendor_price': fields.selection([
(0, 'Manage vendor price on the product form'),
(1, 'Allow using and importing vendor pricelists')
], "Vendor Price",
implied_group="purchase.group_manage_vendor_price"),
}
class account_config_settings(osv.osv_memory):
_inherit = 'account.config.settings'
_columns = {
'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase order lines."),
}
| gpl-3.0 |
eeshangarg/zulip | zerver/lib/test_helpers.py | 1 | 23414 | import collections
import os
import re
import sys
import time
from contextlib import contextmanager
from functools import wraps
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from unittest import mock
import boto3
import fakeldap
import ldap
import orjson
from boto3.resources.base import ServiceResource
from django.conf import settings
from django.db.migrations.state import StateApps
from django.http import HttpResponse, HttpResponseRedirect
from django.test import override_settings
from django.urls import URLResolver
from moto import mock_s3
import zerver.lib.upload
from zerver.lib import cache
from zerver.lib.actions import do_set_realm_property
from zerver.lib.avatar import avatar_url
from zerver.lib.cache import get_cache_backend
from zerver.lib.db import Params, ParamsT, Query, TimeTrackingCursor
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.upload import LocalUploadBackend, S3UploadBackend
from zerver.models import (
Client,
Message,
Realm,
Subscription,
UserMessage,
UserProfile,
get_client,
get_realm,
get_stream,
)
from zerver.tornado.handlers import AsyncDjangoHandler, allocate_handler_id
from zerver.worker import queue_processors
from zproject.backends import ExternalAuthDataDict, ExternalAuthResult
if TYPE_CHECKING:
# Avoid an import cycle; we only need these for type annotations.
from zerver.lib.test_classes import MigrationsTestCase, ZulipTestCase
class MockLDAP(fakeldap.MockLDAP):
class LDAPError(ldap.LDAPError):
pass
class INVALID_CREDENTIALS(ldap.INVALID_CREDENTIALS):
pass
class NO_SUCH_OBJECT(ldap.NO_SUCH_OBJECT):
pass
class ALREADY_EXISTS(ldap.ALREADY_EXISTS):
pass
@contextmanager
def stub_event_queue_user_events(
event_queue_return: Any, user_events_return: Any
) -> Iterator[None]:
with mock.patch("zerver.lib.events.request_event_queue", return_value=event_queue_return):
with mock.patch("zerver.lib.events.get_user_events", return_value=user_events_return):
yield
@contextmanager
def simulated_queue_client(client: Callable[[], object]) -> Iterator[None]:
with mock.patch.object(queue_processors, "SimpleQueueClient", client):
yield
@contextmanager
def cache_tries_captured() -> Iterator[List[Tuple[str, Union[str, List[str]], Optional[str]]]]:
cache_queries: List[Tuple[str, Union[str, List[str]], Optional[str]]] = []
orig_get = cache.cache_get
orig_get_many = cache.cache_get_many
def my_cache_get(key: str, cache_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
cache_queries.append(("get", key, cache_name))
return orig_get(key, cache_name)
def my_cache_get_many(
keys: List[str], cache_name: Optional[str] = None
) -> Dict[str, Any]: # nocoverage -- simulated code doesn't use this
cache_queries.append(("getmany", keys, cache_name))
return orig_get_many(keys, cache_name)
with mock.patch.multiple(cache, cache_get=my_cache_get, cache_get_many=my_cache_get_many):
yield cache_queries
@contextmanager
def simulated_empty_cache() -> Iterator[List[Tuple[str, Union[str, List[str]], Optional[str]]]]:
cache_queries: List[Tuple[str, Union[str, List[str]], Optional[str]]] = []
def my_cache_get(key: str, cache_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
cache_queries.append(("get", key, cache_name))
return None
def my_cache_get_many(
keys: List[str], cache_name: Optional[str] = None
) -> Dict[str, Any]: # nocoverage -- simulated code doesn't use this
cache_queries.append(("getmany", keys, cache_name))
return {}
with mock.patch.multiple(cache, cache_get=my_cache_get, cache_get_many=my_cache_get_many):
yield cache_queries
@contextmanager
def queries_captured(
include_savepoints: bool = False, keep_cache_warm: bool = False
) -> Generator[List[Dict[str, Union[str, bytes]]], None, None]:
"""
Allow a user to capture just the queries executed during
the with statement.
"""
queries: List[Dict[str, Union[str, bytes]]] = []
def wrapper_execute(
self: TimeTrackingCursor,
action: Callable[[str, ParamsT], None],
sql: Query,
params: ParamsT,
) -> None:
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or not isinstance(sql, str) or "SAVEPOINT" not in sql:
queries.append(
{
"sql": self.mogrify(sql, params).decode("utf-8"),
"time": f"{duration:.3f}",
}
)
def cursor_execute(
self: TimeTrackingCursor, sql: Query, params: Optional[Params] = None
) -> None:
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
def cursor_executemany(self: TimeTrackingCursor, sql: Query, params: Iterable[Params]) -> None:
return wrapper_execute(
self, super(TimeTrackingCursor, self).executemany, sql, params
) # nocoverage -- doesn't actually get used in tests
if not keep_cache_warm:
cache = get_cache_backend(None)
cache.clear()
with mock.patch.multiple(
TimeTrackingCursor, execute=cursor_execute, executemany=cursor_executemany
):
yield queries
@contextmanager
def stdout_suppressed() -> Iterator[IO[str]]:
"""Redirect stdout to /dev/null."""
with open(os.devnull, "a") as devnull:
stdout, sys.stdout = sys.stdout, devnull
yield stdout
sys.stdout = stdout
def reset_emails_in_zulip_realm() -> None:
realm = get_realm("zulip")
do_set_realm_property(
realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
def get_test_image_file(filename: str) -> IO[Any]:
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../tests/images"))
return open(os.path.join(test_avatar_dir, filename), "rb")
def avatar_disk_path(
user_profile: UserProfile, medium: bool = False, original: bool = False
) -> str:
avatar_url_path = avatar_url(user_profile, medium)
assert avatar_url_path is not None
avatar_disk_path = os.path.join(
settings.LOCAL_UPLOADS_DIR,
"avatars",
avatar_url_path.split("/")[-2],
avatar_url_path.split("/")[-1].split("?")[0],
)
if original:
return avatar_disk_path.replace(".png", ".original")
return avatar_disk_path
def make_client(name: str) -> Client:
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address: str) -> Optional[str]:
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-z0-9]{24})>")
for message in reversed(outbox):
if address in message.to:
match = key_regex.search(message.body)
assert match is not None
[key] = match.groups()
return key
return None # nocoverage -- in theory a test might want this case, but none do
def message_stream_count(user_profile: UserProfile) -> int:
return UserMessage.objects.select_related("message").filter(user_profile=user_profile).count()
def most_recent_usermessage(user_profile: UserProfile) -> UserMessage:
query = (
UserMessage.objects.select_related("message")
.filter(user_profile=user_profile)
.order_by("-message")
)
return query[0] # Django does LIMIT here
def most_recent_message(user_profile: UserProfile) -> Message:
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_subscription(stream_name: str, user_profile: UserProfile) -> Subscription:
stream = get_stream(stream_name, user_profile.realm)
recipient_id = stream.recipient_id
return Subscription.objects.get(
user_profile=user_profile, recipient_id=recipient_id, active=True
)
def get_user_messages(user_profile: UserProfile) -> List[Message]:
query = (
UserMessage.objects.select_related("message")
.filter(user_profile=user_profile)
.order_by("message")
)
return [um.message for um in query]
class DummyHandler(AsyncDjangoHandler):
def __init__(self) -> None:
allocate_handler_id(self)
class HostRequestMock:
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(
self,
post_data: Dict[str, Any] = {},
user_profile: Optional[UserProfile] = None,
host: str = settings.EXTERNAL_HOST,
client_name: Optional[str] = None,
) -> None:
self.host = host
self.GET: Dict[str, Any] = {}
self.method = ""
if client_name is not None:
self.client = get_client(client_name)
# Convert any integer parameters passed into strings, even
# though of course the HTTP API would do so. Ideally, we'd
# get rid of this abstraction entirely and just use the HTTP
# API directly, but while it exists, we need this code
self.POST: Dict[str, Any] = {}
for key in post_data:
self.POST[key] = str(post_data[key])
self.method = "POST"
self._tornado_handler = DummyHandler()
self._log_data: Dict[str, Any] = {}
self.META = {"PATH_INFO": "test"}
self.path = ""
self.user = user_profile
self.body = ""
self.content_type = ""
self.client_name = ""
def get_host(self) -> str:
return self.host
INSTRUMENTING = os.environ.get("TEST_INSTRUMENT_URL_COVERAGE", "") == "TRUE"
INSTRUMENTED_CALLS: List[Dict[str, Any]] = []
UrlFuncT = TypeVar("UrlFuncT", bound=Callable[..., HttpResponse]) # TODO: make more specific
def append_instrumentation_data(data: Dict[str, Any]) -> None:
INSTRUMENTED_CALLS.append(data)
def instrument_url(f: UrlFuncT) -> UrlFuncT:
if not INSTRUMENTING: # nocoverage -- option is always enabled; should we remove?
return f
else:
def wrapper(
self: "ZulipTestCase", url: str, info: object = {}, **kwargs: Any
) -> HttpResponse:
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if "?" in url:
url, extra_info = url.split("?", 1)
else:
extra_info = ""
if isinstance(info, HostRequestMock):
info = "<HostRequestMock>"
elif isinstance(info, bytes):
info = "<bytes>"
elif isinstance(info, dict):
info = {
k: "<file object>" if hasattr(v, "read") and callable(getattr(v, "read")) else v
for k, v in info.items()
}
append_instrumentation_data(
dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs,
)
)
return result
return cast(UrlFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> None:
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt: Dict[str, int] = collections.defaultdict(int)
def re_strip(r: Any) -> str:
return str(r).lstrip("^").rstrip("$")
def find_patterns(patterns: List[Any], prefixes: List[str]) -> None:
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url: str) -> str:
if url.startswith("/"):
url = url[1:]
if url.startswith("http://testserver/"):
url = url[len("http://testserver/") :]
if url.startswith("http://zulip.testserver/"):
url = url[len("http://zulip.testserver/") :]
if url.startswith("http://testserver:9080/"):
url = url[len("http://testserver:9080/") :]
return url
def find_pattern(pattern: Any, prefixes: List[str]) -> None:
if isinstance(pattern, type(URLResolver)):
return # nocoverage -- shouldn't actually happen
if hasattr(pattern, "url_patterns"):
return
canon_pattern = prefixes[0] + re_strip(pattern.pattern.regex.pattern)
cnt = 0
for call in calls:
if "pattern" in call:
continue
url = cleanup_url(call["url"])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix) :]
if pattern.resolve(match_url):
if call["status_code"] in [200, 204, 301, 302]:
cnt += 1
call["pattern"] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ["", "en/", "de/"])
find_patterns(v1_api_and_json_patterns, ["api/v1/", "json/"])
assert len(pattern_cnt) > 100
untested_patterns = {p.replace("\\", "") for p in pattern_cnt if pattern_cnt[p] == 0}
exempt_patterns = {
# We exempt some patterns that are called via Tornado.
"api/v1/events",
"api/v1/events/internal",
"api/v1/register",
# We also exempt some development environment debugging
# static content URLs, since the content they point to may
# or may not exist.
"coverage/(?P<path>.+)",
"confirmation_key/",
"node-coverage/(?P<path>.+)",
"docs/(?P<path>.+)",
"help/configure-missed-message-emails",
"help/community-topic-edits",
"help/delete-a-stream",
"api/delete-stream",
"casper/(?P<path>.+)",
"static/(?P<path>.+)",
"flush_caches",
"external_content/(?P<digest>[^/]+)/(?P<received_url>[^/]+)",
*(webhook.url for webhook in WEBHOOK_INTEGRATIONS if not include_webhooks),
}
untested_patterns -= exempt_patterns
var_dir = "var" # TODO make sure path is robust here
fn = os.path.join(var_dir, "url_coverage.txt")
with open(fn, "wb") as f:
for call in calls:
f.write(orjson.dumps(call, option=orjson.OPT_APPEND_NEWLINE))
if full_suite:
print(f"INFO: URL coverage report is in {fn}")
print("INFO: Try running: ./tools/create-test-api-docs")
if full_suite and len(untested_patterns): # nocoverage -- test suite error handling
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(f" {untested_pattern}")
sys.exit(1)
def load_subdomain_token(response: HttpResponse) -> ExternalAuthDataDict:
assert isinstance(response, HttpResponseRedirect)
token = response.url.rsplit("/", 1)[1]
data = ExternalAuthResult(login_token=token, delete_stored_data=False).data_dict
assert data is not None
return data
FuncT = TypeVar("FuncT", bound=Callable[..., None])
def use_s3_backend(method: FuncT) -> FuncT:
@mock_s3
@override_settings(LOCAL_UPLOADS_DIR=None)
def new_method(*args: Any, **kwargs: Any) -> Any:
zerver.lib.upload.upload_backend = S3UploadBackend()
try:
return method(*args, **kwargs)
finally:
zerver.lib.upload.upload_backend = LocalUploadBackend()
return new_method
def create_s3_buckets(*bucket_names: str) -> List[ServiceResource]:
session = boto3.Session(settings.S3_KEY, settings.S3_SECRET_KEY)
s3 = session.resource("s3")
buckets = [s3.create_bucket(Bucket=name) for name in bucket_names]
return buckets
def use_db_models(
method: Callable[["MigrationsTestCase", StateApps], None]
) -> Callable[["MigrationsTestCase", StateApps], None]: # nocoverage
def method_patched_with_mock(self: "MigrationsTestCase", apps: StateApps) -> None:
ArchivedAttachment = apps.get_model("zerver", "ArchivedAttachment")
ArchivedMessage = apps.get_model("zerver", "ArchivedMessage")
ArchivedUserMessage = apps.get_model("zerver", "ArchivedUserMessage")
Attachment = apps.get_model("zerver", "Attachment")
BotConfigData = apps.get_model("zerver", "BotConfigData")
BotStorageData = apps.get_model("zerver", "BotStorageData")
Client = apps.get_model("zerver", "Client")
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
DefaultStream = apps.get_model("zerver", "DefaultStream")
DefaultStreamGroup = apps.get_model("zerver", "DefaultStreamGroup")
EmailChangeStatus = apps.get_model("zerver", "EmailChangeStatus")
Huddle = apps.get_model("zerver", "Huddle")
Message = apps.get_model("zerver", "Message")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
MutedTopic = apps.get_model("zerver", "MutedTopic")
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
PushDeviceToken = apps.get_model("zerver", "PushDeviceToken")
Reaction = apps.get_model("zerver", "Reaction")
Realm = apps.get_model("zerver", "Realm")
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
RealmDomain = apps.get_model("zerver", "RealmDomain")
RealmEmoji = apps.get_model("zerver", "RealmEmoji")
RealmFilter = apps.get_model("zerver", "RealmFilter")
Recipient = apps.get_model("zerver", "Recipient")
Recipient.PERSONAL = 1
Recipient.STREAM = 2
Recipient.HUDDLE = 3
ScheduledEmail = apps.get_model("zerver", "ScheduledEmail")
ScheduledMessage = apps.get_model("zerver", "ScheduledMessage")
Service = apps.get_model("zerver", "Service")
Stream = apps.get_model("zerver", "Stream")
Subscription = apps.get_model("zerver", "Subscription")
UserActivity = apps.get_model("zerver", "UserActivity")
UserActivityInterval = apps.get_model("zerver", "UserActivityInterval")
UserGroup = apps.get_model("zerver", "UserGroup")
UserGroupMembership = apps.get_model("zerver", "UserGroupMembership")
UserHotspot = apps.get_model("zerver", "UserHotspot")
UserMessage = apps.get_model("zerver", "UserMessage")
UserPresence = apps.get_model("zerver", "UserPresence")
UserProfile = apps.get_model("zerver", "UserProfile")
zerver_models_patch = mock.patch.multiple(
"zerver.models",
ArchivedAttachment=ArchivedAttachment,
ArchivedMessage=ArchivedMessage,
ArchivedUserMessage=ArchivedUserMessage,
Attachment=Attachment,
BotConfigData=BotConfigData,
BotStorageData=BotStorageData,
Client=Client,
CustomProfileField=CustomProfileField,
CustomProfileFieldValue=CustomProfileFieldValue,
DefaultStream=DefaultStream,
DefaultStreamGroup=DefaultStreamGroup,
EmailChangeStatus=EmailChangeStatus,
Huddle=Huddle,
Message=Message,
MultiuseInvite=MultiuseInvite,
MutedTopic=MutedTopic,
PreregistrationUser=PreregistrationUser,
PushDeviceToken=PushDeviceToken,
Reaction=Reaction,
Realm=Realm,
RealmAuditLog=RealmAuditLog,
RealmDomain=RealmDomain,
RealmEmoji=RealmEmoji,
RealmFilter=RealmFilter,
Recipient=Recipient,
ScheduledEmail=ScheduledEmail,
ScheduledMessage=ScheduledMessage,
Service=Service,
Stream=Stream,
Subscription=Subscription,
UserActivity=UserActivity,
UserActivityInterval=UserActivityInterval,
UserGroup=UserGroup,
UserGroupMembership=UserGroupMembership,
UserHotspot=UserHotspot,
UserMessage=UserMessage,
UserPresence=UserPresence,
UserProfile=UserProfile,
)
zerver_test_helpers_patch = mock.patch.multiple(
"zerver.lib.test_helpers",
Client=Client,
Message=Message,
Subscription=Subscription,
UserMessage=UserMessage,
UserProfile=UserProfile,
)
zerver_test_classes_patch = mock.patch.multiple(
"zerver.lib.test_classes",
Client=Client,
Message=Message,
Realm=Realm,
Recipient=Recipient,
Stream=Stream,
Subscription=Subscription,
UserProfile=UserProfile,
)
with zerver_models_patch, zerver_test_helpers_patch, zerver_test_classes_patch:
method(self, apps)
return method_patched_with_mock
def create_dummy_file(filename: str) -> str:
filepath = os.path.join(settings.TEST_WORKER_DIR, filename)
with open(filepath, "w") as f:
f.write("zulip!")
return filepath
def zulip_reaction_info() -> Dict[str, str]:
return dict(
emoji_name="zulip",
emoji_code="zulip",
reaction_type="zulip_extra_emoji",
)
@contextmanager
def mock_queue_publish(
method_to_patch: str,
**kwargs: object,
) -> Iterator[mock.MagicMock]:
inner = mock.MagicMock(**kwargs)
# This helper ensures that events published to the queues are
# serializable as JSON; unserializable events would make RabbitMQ
# crash in production.
def verify_serialize(
queue_name: str,
event: Dict[str, object],
processor: Optional[Callable[[object], None]] = None,
) -> None:
marshalled_event = orjson.loads(orjson.dumps(event))
assert marshalled_event == event
inner(queue_name, event, processor)
with mock.patch(method_to_patch, side_effect=verify_serialize):
yield inner
def patch_queue_publish(
method_to_patch: str,
) -> Callable[[Callable[..., None]], Callable[..., None]]:
def inner(func: Callable[..., None]) -> Callable[..., None]:
@wraps(func)
def _wrapped(*args: object, **kwargs: object) -> None:
with mock_queue_publish(method_to_patch) as m:
func(*args, m, **kwargs)
return _wrapped
return inner
| apache-2.0 |
x1957/kubernetes | cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | 365 | 1084 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| apache-2.0 |
varunagrawal/azure-services | varunagrawal/site-packages/django/contrib/gis/geos/tests/test_geos.py | 47 | 43712 | import ctypes
import random
import unittest
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID) """
# Raise a warning if SRID <0/None.
import warnings
print "\nBEGIN - expecting Warnings; safe to ignore.\n"
# Test for do-nothing behavior.
try:
# Keeping line-noise down by only printing the relevant
# warnings once.
warnings.simplefilter('once', UserWarning)
warnings.simplefilter('once', FutureWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, -1)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
finally:
warnings.simplefilter('default', UserWarning)
warnings.simplefilter('default', FutureWarning)
print "\nEND - expecting Warnings; safe to ignore.\n"
# test warning is raised
try:
warnings.simplefilter('error', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
finally:
warnings.simplefilter('default', FutureWarning)
warnings.simplefilter('default', UserWarning)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertTrue(not g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test28_geos_version(self):
"Testing the GEOS version regular expression."
from django.contrib.gis.geos.libgeos import version_regex
versions = [ ('3.0.0rc4-CAPI-1.3.3', '3.0.0'),
('3.0.0-CAPI-1.4.1', '3.0.0'),
('3.4.0dev-CAPI-1.8.0', '3.4.0') ]
for v, expected in versions:
m = version_regex.match(v)
self.assertTrue(m)
self.assertEqual(m.group('version'), expected)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-2.0 |
octavioturra/aritial | google_appengine/lib/django/django/db/models/fields/related.py | 32 | 36473 | from django.db import backend, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, get_ul_class
from django.db.models.related import RelatedObject
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy, string_concat, ngettext
from django.utils.functional import curry
from django.core import validators
from django import oldforms
from django import newforms as forms
from django.dispatch import dispatcher
# For Python 2.3
if not hasattr(__builtins__, 'set'):
from sets import Set as set
# Values for Relation.edit_inline.
TABULAR, STACKED = 1, 2
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lookup(rel_cls, field):
name = field.rel.to
module = rel_cls.__module__
key = (module, name)
# Has the model already been loaded?
# If so, resolve the string reference right away
model = get_model(rel_cls._meta.app_label, field.rel.to, False)
if model:
field.rel.to = model
field.do_related_class(model, rel_cls)
else:
# Mark the related field for later lookup
pending_lookups.setdefault(key, []).append((rel_cls, field))
def do_pending_lookups(sender):
other_cls = sender
key = (other_cls.__module__, other_cls.__name__)
for rel_cls, field in pending_lookups.setdefault(key, []):
field.rel.to = other_cls
field.do_related_class(other_cls, rel_cls)
dispatcher.connect(do_pending_lookups, signal=signals.class_prepared)
def manipulator_valid_rel_key(f, self, field_data, all_data):
"Validates that the value is a valid foreign key"
klass = f.rel.to
try:
klass._default_manager.get(**{f.rel.field_name: field_data})
except klass.DoesNotExist:
raise validators.ValidationError, _("Please enter a valid %s.") % f.verbose_name
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
other = self.rel.to
if isinstance(other, basestring):
if other == RECURSIVE_RELATIONSHIP_CONSTANT:
self.rel.to = cls.__name__
add_lookup(cls, self)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
self.verbose_name = self.verbose_name or self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
try:
while True:
v = getattr(v, v._meta.pk.name)
except AttributeError:
pass
return v
if lookup_type == 'exact':
return [pk_trace(value)]
if lookup_type == 'in':
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this related object
# in a table-spanning query. It uses the lower-cased object_name by default,
# but this can be overridden with the "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Clear the cache, if it exists
try:
delattr(value, self.related.field.get_cache_name())
except AttributeError:
pass
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
rel_obj = self.field.rel.to._default_manager.get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Clear the cache, if it exists
try:
delattr(instance, self.field.get_cache_name())
except AttributeError:
pass
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
new_obj = self.model(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
manager.core_filters = {'%s__pk' % rel_field.name: getattr(instance, rel_field.rel.get_related_field().attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % model)
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
new_obj = self.model(**kwargs)
new_obj.save()
self.add(new_obj)
return new_obj
create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
from django.db import connection
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
if cursor.rowcount is not None and cursor.rowcount != 0:
existing_ids = set([row[0] for row in cursor.fetchmany(cursor.rowcount)])
else:
existing_ids = set()
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
from django.db import connection
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
from django.db import connection
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass)
qn = backend.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass)
qn = backend.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "ForeignKey(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if kwargs.has_key('edit_inline_type'):
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.")
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = ManyToOneRel(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 3),
min_num_in_admin=kwargs.pop('min_num_in_admin', None),
max_num_in_admin=kwargs.pop('max_num_in_admin', None),
num_extra_on_change=kwargs.pop('num_extra_on_change', 1),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def prepare_field_objs_and_params(self, manipulator, name_prefix):
params = {'validator_list': self.validator_list[:], 'member_name': name_prefix + self.attname}
if self.rel.raw_id_admin:
field_objs = self.get_manipulator_field_objs()
params['validator_list'].append(curry(manipulator_valid_rel_key, self, manipulator))
else:
if self.radio_admin:
field_objs = [oldforms.RadioSelectField]
params['ul_class'] = get_ul_class(self.radio_admin)
else:
if self.null:
field_objs = [oldforms.NullSelectField]
else:
field_objs = [oldforms.SelectField]
params['choices'] = self.get_choices_default()
return field_objs, params
def get_manipulator_field_objs(self):
rel_field = self.rel.get_related_field()
if self.rel.raw_id_admin and not isinstance(rel_field, AutoField):
return rel_field.get_manipulator_field_objs()
else:
return [oldforms.IntegerField]
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def flatten_data(self, follow, obj=None):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# (radio_admin=False), we have to check that the length of choices
# is *2*, not 1, because SelectFields always have an initial
# "blank" value. Otherwise (radio_admin=True), we check that the
# length is 1.
if not self.blank and (not self.rel.raw_id_admin or self.choices):
choice_list = self.get_choices_default()
if self.radio_admin and len(choice_list) == 1:
return {self.attname: choice_list[0][0]}
if not self.radio_admin and len(choice_list) == 2:
return {self.attname: choice_list[1][0]}
return Field.flatten_data(self, follow, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {'queryset': self.rel.to._default_manager.all(), 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
defaults.update(kwargs)
return forms.ModelChoiceField(**defaults)
class OneToOneField(RelatedField, IntegerField):
def __init__(self, to, to_field=None, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "OneToOneField(%r) is invalid. First parameter to OneToOneField must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if kwargs.has_key('edit_inline_type'):
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.")
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = OneToOneRel(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 0),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
kwargs['primary_key'] = True
IntegerField.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
# TODO: Copied from ForeignKey... putting this in RelatedField adversely affects
# ManyToManyField. This works for now.
def prepare_field_objs_and_params(self, manipulator, name_prefix):
params = {'validator_list': self.validator_list[:], 'member_name': name_prefix + self.attname}
if self.rel.raw_id_admin:
field_objs = self.get_manipulator_field_objs()
params['validator_list'].append(curry(manipulator_valid_rel_key, self, manipulator))
else:
if self.radio_admin:
field_objs = [oldforms.RadioSelectField]
params['ul_class'] = get_ul_class(self.radio_admin)
else:
if self.null:
field_objs = [oldforms.NullSelectField]
else:
field_objs = [oldforms.SelectField]
params['choices'] = self.get_choices_default()
return field_objs, params
def contribute_to_class(self, cls, name):
super(OneToOneField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
def formfield(self, **kwargs):
defaults = {'queryset': self.rel.to._default_manager.all(), 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
defaults.update(kwargs)
return forms.ModelChoiceField(**defaults)
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
num_in_admin=kwargs.pop('num_in_admin', 0),
related_name=kwargs.pop('related_name', None),
filter_interface=kwargs.pop('filter_interface', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
raw_id_admin=kwargs.pop('raw_id_admin', False),
symmetrical=kwargs.pop('symmetrical', True))
self.db_table = kwargs.pop('db_table', None)
if kwargs["rel"].raw_id_admin:
kwargs.setdefault("validator_list", []).append(self.isValidIDList)
Field.__init__(self, **kwargs)
if self.rel.raw_id_admin:
msg = gettext_lazy('Separate multiple IDs with commas.')
else:
msg = gettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_manipulator_field_objs(self):
if self.rel.raw_id_admin:
return [oldforms.RawIdAdminField]
else:
choices = self.get_choices_default()
return [curry(oldforms.SelectMultipleField, size=min(max(len(choices), 5), 15), choices=choices)]
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.db_table:
return self.db_table
else:
return '%s_%s' % (opts.db_table, self.name)
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
# If this is an m2m relation to self, avoid the inevitable name clash
if related.model == related.parent_model:
return 'from_' + related.model._meta.object_name.lower() + '_id'
else:
return related.model._meta.object_name.lower() + '_id'
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
# If this is an m2m relation to self, avoid the inevitable name clash
if related.model == related.parent_model:
return 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
return related.parent_model._meta.object_name.lower() + '_id'
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise validators.ValidationError, ngettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.", len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
}
def flatten_data(self, follow, obj = None):
new_data = {}
if obj:
instance_ids = [instance._get_pk_val() for instance in getattr(obj, self.name).all()]
if self.rel.raw_id_admin:
new_data[self.name] = ",".join([str(id) for id in instance_ids])
else:
new_data[self.name] = instance_ids
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank and not self.rel.edit_inline and not self.rel.raw_id_admin:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
new_data[self.name] = [choices_list[0][0]]
return new_data
def contribute_to_class(self, cls, name):
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def formfield(self, **kwargs):
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if kwargs.get('initial') is not None:
kwargs['initial'] = [i._get_pk_val() for i in kwargs['initial']]
defaults = {'queryset' : self.rel.to._default_manager.all(), 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
defaults.update(kwargs)
return forms.ModelMultipleChoiceField(**defaults)
class ManyToOneRel(object):
def __init__(self, to, field_name, num_in_admin=3, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=1, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None, raw_id_admin=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.min_num_in_admin, self.max_num_in_admin = min_num_in_admin, max_num_in_admin
self.num_extra_on_change, self.related_name = num_extra_on_change, related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = True
def get_related_field(self):
"Returns the Field in the 'to' object to which this relationship is tied."
return self.to._meta.get_field(self.field_name)
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, num_in_admin=0, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False):
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, num_in_admin=0, related_name=None,
filter_interface=None, limit_choices_to=None, raw_id_admin=False, symmetrical=True):
self.to = to
self.num_in_admin = num_in_admin
self.related_name = related_name
self.filter_interface = filter_interface
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.edit_inline = False
self.raw_id_admin = raw_id_admin
self.symmetrical = symmetrical
self.multiple = True
assert not (self.raw_id_admin and self.filter_interface), "ManyToManyRels may not use both raw_id_admin and filter_interface"
| apache-2.0 |
TripleDogDare/RadioWCSpy | backend/env/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py | 474 | 1597 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
| mit |
daspecster/google-cloud-python | spanner/unit_tests/test_session.py | 1 | 33464 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.cloud._testing import _GAXBaseAPI
class TestSession(unittest.TestCase):
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID)
DATABASE_ID = 'database-id'
DATABASE_NAME = INSTANCE_NAME + '/databases/' + DATABASE_ID
SESSION_ID = 'session-id'
SESSION_NAME = DATABASE_NAME + '/sessions/' + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner.session import Session
return Session
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
self.assertTrue(session.session_id is None)
self.assertTrue(session._database is database)
def test___lt___(self):
database = _Database(self.DATABASE_NAME)
lhs = self._make_one(database)
lhs._session_id = b'123'
rhs = self._make_one(database)
rhs._session_id = b'234'
self.assertTrue(lhs < rhs)
def test_name_property_wo_session_id(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
_ = session.name
def test_name_property_w_session_id(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertEqual(session.name, self.SESSION_NAME)
def test_create_w_session_id(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(ValueError):
session.create()
def test_create_ok(self):
session_pb = _SessionPB(self.SESSION_NAME)
gax_api = _SpannerApi(_create_session_response=session_pb)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session.create()
self.assertEqual(session.session_id, self.SESSION_ID)
database_name, options = gax_api._create_session_called_with
self.assertEqual(database_name, self.DATABASE_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_create_error(self):
from google.gax.errors import GaxError
gax_api = _SpannerApi(_random_gax_error=True)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
with self.assertRaises(GaxError):
session.create()
database_name, options = gax_api._create_session_called_with
self.assertEqual(database_name, self.DATABASE_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_exists_wo_session_id(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
self.assertFalse(session.exists())
def test_exists_hit(self):
session_pb = _SessionPB(self.SESSION_NAME)
gax_api = _SpannerApi(_get_session_response=session_pb)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertTrue(session.exists())
session_name, options = gax_api._get_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_exists_miss(self):
gax_api = _SpannerApi()
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertFalse(session.exists())
session_name, options = gax_api._get_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_exists_error(self):
from google.gax.errors import GaxError
gax_api = _SpannerApi(_random_gax_error=True)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(GaxError):
session.exists()
session_name, options = gax_api._get_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_delete_wo_session_id(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.delete()
def test_delete_hit(self):
gax_api = _SpannerApi(_delete_session_ok=True)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
session.delete()
session_name, options = gax_api._delete_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_delete_miss(self):
from google.cloud.exceptions import NotFound
gax_api = _SpannerApi(_delete_session_ok=False)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(NotFound):
session.delete()
session_name, options = gax_api._delete_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_delete_error(self):
from google.gax.errors import GaxError
gax_api = _SpannerApi(_random_gax_error=True)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(GaxError):
session.delete()
session_name, options = gax_api._delete_session_called_with
self.assertEqual(session_name, self.SESSION_NAME)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_snapshot_not_created(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.snapshot()
def test_snapshot_created(self):
from google.cloud.spanner.snapshot import Snapshot
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF' # emulate 'session.create()'
snapshot = session.snapshot()
self.assertIsInstance(snapshot, Snapshot)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
def test_read_not_created(self):
from google.cloud.spanner.keyset import KeySet
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
KEYS = ['bharney@example.com', 'phred@example.com']
KEYSET = KeySet(keys=KEYS)
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.read(TABLE_NAME, COLUMNS, KEYSET)
def test_read(self):
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
from google.cloud.spanner.keyset import KeySet
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
KEYS = ['bharney@example.com', 'phred@example.com']
KEYSET = KeySet(keys=KEYS)
INDEX = 'email-address-index'
LIMIT = 20
TOKEN = b'DEADBEEF'
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF'
_read_with = []
expected = object()
class _Snapshot(object):
def __init__(self, session, **kwargs):
self._session = session
self._kwargs = kwargs.copy()
def read(self, table, columns, keyset, index='', limit=0,
resume_token=b''):
_read_with.append(
(table, columns, keyset, index, limit, resume_token))
return expected
with _Monkey(MUT, Snapshot=_Snapshot):
found = session.read(
TABLE_NAME, COLUMNS, KEYSET,
index=INDEX, limit=LIMIT, resume_token=TOKEN)
self.assertIs(found, expected)
self.assertEqual(len(_read_with), 1)
(table, columns, key_set, index, limit, resume_token) = _read_with[0]
self.assertEqual(table, TABLE_NAME)
self.assertEqual(columns, COLUMNS)
self.assertEqual(key_set, KEYSET)
self.assertEqual(index, INDEX)
self.assertEqual(limit, LIMIT)
self.assertEqual(resume_token, TOKEN)
def test_execute_sql_not_created(self):
SQL = 'SELECT first_name, age FROM citizens'
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.execute_sql(SQL)
def test_execute_sql_defaults(self):
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
SQL = 'SELECT first_name, age FROM citizens'
TOKEN = b'DEADBEEF'
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF'
_executed_sql_with = []
expected = object()
class _Snapshot(object):
def __init__(self, session, **kwargs):
self._session = session
self._kwargs = kwargs.copy()
def execute_sql(
self, sql, params=None, param_types=None, query_mode=None,
resume_token=None):
_executed_sql_with.append(
(sql, params, param_types, query_mode, resume_token))
return expected
with _Monkey(MUT, Snapshot=_Snapshot):
found = session.execute_sql(SQL, resume_token=TOKEN)
self.assertIs(found, expected)
self.assertEqual(len(_executed_sql_with), 1)
sql, params, param_types, query_mode, token = _executed_sql_with[0]
self.assertEqual(sql, SQL)
self.assertEqual(params, None)
self.assertEqual(param_types, None)
self.assertEqual(query_mode, None)
self.assertEqual(token, TOKEN)
def test_batch_not_created(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.batch()
def test_batch_created(self):
from google.cloud.spanner.batch import Batch
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF'
batch = session.batch()
self.assertIsInstance(batch, Batch)
self.assertTrue(batch._session is session)
def test_transaction_not_created(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
with self.assertRaises(ValueError):
session.transaction()
def test_transaction_created(self):
from google.cloud.spanner.transaction import Transaction
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF'
transaction = session.transaction()
self.assertIsInstance(transaction, Transaction)
self.assertTrue(transaction._session is session)
self.assertTrue(session._transaction is transaction)
def test_transaction_w_existing_txn(self):
database = _Database(self.DATABASE_NAME)
session = self._make_one(database)
session._session_id = 'DEADBEEF'
existing = session.transaction()
another = session.transaction() # invalidates existing txn
self.assertTrue(session._transaction is another)
self.assertTrue(existing._rolled_back)
def test_retry_transaction_w_commit_error_txn_already_begun(self):
from google.gax.errors import GaxError
from google.cloud.spanner.transaction import Transaction
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
gax_api = _SpannerApi(
_commit_error=True,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
begun_txn = session._transaction = Transaction(session)
begun_txn._id = b'FACEDACE'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
with self.assertRaises(GaxError):
session.run_in_transaction(unit_of_work)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIs(txn, begun_txn)
self.assertEqual(txn.committed, None)
self.assertEqual(args, ())
self.assertEqual(kw, {})
def test_run_in_transaction_callback_raises_abort(self):
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud.spanner.transaction import Transaction
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
transaction_pb = TransactionPB(id=TRANSACTION_ID)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_rollback_response=None,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
class Testing(Exception):
pass
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
raise Testing()
with self.assertRaises(Testing):
session.run_in_transaction(unit_of_work)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertTrue(txn._rolled_back)
self.assertEqual(args, ())
self.assertEqual(kw, {})
def test_run_in_transaction_w_args_w_kwargs_wo_abort(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner.transaction import Transaction
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_response=response,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
committed = session.run_in_transaction(
unit_of_work, 'abc', some_arg='def')
self.assertEqual(committed, now)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertEqual(txn.committed, committed)
self.assertEqual(args, ('abc',))
self.assertEqual(kw, {'some_arg': 'def'})
def test_run_in_transaction_w_abort_no_retry_metadata(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner.transaction import Transaction
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_abort_count=1,
_commit_response=response,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
committed = session.run_in_transaction(
unit_of_work, 'abc', some_arg='def')
self.assertEqual(committed, now)
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
if index == 1:
self.assertEqual(txn.committed, committed)
else:
self.assertIsNone(txn.committed)
self.assertEqual(args, ('abc',))
self.assertEqual(kw, {'some_arg': 'def'})
def test_run_in_transaction_w_abort_w_retry_metadata(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner.transaction import Transaction
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
RETRY_SECONDS = 12
RETRY_NANOS = 3456
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_abort_count=1,
_commit_abort_retry_seconds=RETRY_SECONDS,
_commit_abort_retry_nanos=RETRY_NANOS,
_commit_response=response,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
time_module = _FauxTimeModule()
with _Monkey(MUT, time=time_module):
committed = session.run_in_transaction(
unit_of_work, 'abc', some_arg='def')
self.assertEqual(time_module._slept,
RETRY_SECONDS + RETRY_NANOS / 1.0e9)
self.assertEqual(committed, now)
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
if index == 1:
self.assertEqual(txn.committed, committed)
else:
self.assertIsNone(txn.committed)
self.assertEqual(args, ('abc',))
self.assertEqual(kw, {'some_arg': 'def'})
def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self):
import datetime
from google.gax.errors import GaxError
from grpc import StatusCode
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner.transaction import Transaction
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
RETRY_SECONDS = 1
RETRY_NANOS = 3456
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_abort_retry_seconds=RETRY_SECONDS,
_commit_abort_retry_nanos=RETRY_NANOS,
_commit_response=response,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
if len(called_with) < 2:
grpc_error = gax_api._make_grpc_error(
StatusCode.ABORTED,
trailing=gax_api._trailing_metadata())
raise GaxError('conflict', grpc_error)
txn.insert(TABLE_NAME, COLUMNS, VALUES)
time_module = _FauxTimeModule()
with _Monkey(MUT, time=time_module):
committed = session.run_in_transaction(unit_of_work)
self.assertEqual(committed, now)
self.assertEqual(time_module._slept,
RETRY_SECONDS + RETRY_NANOS / 1.0e9)
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
if index == 0:
self.assertIsNone(txn.committed)
else:
self.assertEqual(txn.committed, now)
self.assertEqual(args, ())
self.assertEqual(kw, {})
def test_run_in_transaction_w_abort_w_retry_metadata_deadline(self):
import datetime
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from grpc import StatusCode
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner.transaction import Transaction
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
RETRY_SECONDS = 1
RETRY_NANOS = 3456
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_abort_count=1,
_commit_abort_retry_seconds=RETRY_SECONDS,
_commit_abort_retry_nanos=RETRY_NANOS,
_commit_response=response,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
time_module = _FauxTimeModule()
with _Monkey(MUT, time=time_module):
with self.assertRaises(GaxError) as exc:
session.run_in_transaction(
unit_of_work, 'abc', some_arg='def', timeout_secs=0.01)
self.assertEqual(exc_to_code(exc.exception.cause), StatusCode.ABORTED)
self.assertIsNone(time_module._slept)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertEqual(args, ('abc',))
self.assertEqual(kw, {'some_arg': 'def'})
def test_run_in_transaction_w_timeout(self):
from google.cloud.spanner import session as MUT
from google.cloud._testing import _Monkey
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from grpc import StatusCode
from google.cloud.spanner.transaction import Transaction
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
TRANSACTION_ID = b'FACEDACE'
transaction_pb = TransactionPB(id=TRANSACTION_ID)
gax_api = _SpannerApi(
_begin_transaction_response=transaction_pb,
_commit_abort_count=1e6,
)
database = _Database(self.DATABASE_NAME)
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = 'DEADBEEF'
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
time_module = _FauxTimeModule()
time_module._times = [1, 1.5, 2.5] # retry once w/ timeout_secs=1
with _Monkey(MUT, time=time_module):
with self.assertRaises(GaxError) as exc:
session.run_in_transaction(unit_of_work, timeout_secs=1)
self.assertEqual(exc_to_code(exc.exception.cause), StatusCode.ABORTED)
self.assertEqual(time_module._slept, None)
self.assertEqual(len(called_with), 2)
for txn, args, kw in called_with:
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertEqual(args, ())
self.assertEqual(kw, {})
class _Database(object):
def __init__(self, name):
self.name = name
class _SpannerApi(_GAXBaseAPI):
_commit_abort_count = 0
_commit_abort_retry_seconds = None
_commit_abort_retry_nanos = None
_random_gax_error = _commit_error = False
def create_session(self, database, options=None):
from google.gax.errors import GaxError
self._create_session_called_with = database, options
if self._random_gax_error:
raise GaxError('error')
return self._create_session_response
def get_session(self, name, options=None):
from google.gax.errors import GaxError
self._get_session_called_with = name, options
if self._random_gax_error:
raise GaxError('error')
try:
return self._get_session_response
except AttributeError:
raise GaxError('miss', self._make_grpc_not_found())
def delete_session(self, name, options=None):
from google.gax.errors import GaxError
self._delete_session_called_with = name, options
if self._random_gax_error:
raise GaxError('error')
if not self._delete_session_ok:
raise GaxError('miss', self._make_grpc_not_found())
def begin_transaction(self, session, options_, options=None):
self._begun = (session, options_, options)
return self._begin_transaction_response
def _trailing_metadata(self):
from google.protobuf.duration_pb2 import Duration
from google.rpc.error_details_pb2 import RetryInfo
from grpc._common import to_cygrpc_metadata
if self._commit_abort_retry_nanos is None:
return to_cygrpc_metadata(())
retry_info = RetryInfo(
retry_delay=Duration(
seconds=self._commit_abort_retry_seconds,
nanos=self._commit_abort_retry_nanos))
return to_cygrpc_metadata([
('google.rpc.retryinfo-bin', retry_info.SerializeToString())])
def commit(self, session, mutations,
transaction_id='', single_use_transaction=None, options=None):
from grpc import StatusCode
from google.gax.errors import GaxError
assert single_use_transaction is None
self._committed = (session, mutations, transaction_id, options)
if self._commit_error:
raise GaxError('error', self._make_grpc_error(StatusCode.UNKNOWN))
if self._commit_abort_count > 0:
self._commit_abort_count -= 1
grpc_error = self._make_grpc_error(
StatusCode.ABORTED, trailing=self._trailing_metadata())
raise GaxError('conflict', grpc_error)
return self._commit_response
def rollback(self, session, transaction_id, options=None):
self._rolled_back = (session, transaction_id, options)
return self._rollback_response
class _SessionPB(object):
def __init__(self, name):
self.name = name
class _FauxTimeModule(object):
_slept = None
_times = ()
def time(self):
import time
if len(self._times) > 0:
return self._times.pop(0)
return time.time()
def sleep(self, seconds):
self._slept = seconds
| apache-2.0 |
lock8/drf-extensions | tests_app/settings.py | 5 | 4446 | # Django settings for testproject project.
import os
import multiprocessing
BASE_PATH = os.path.dirname(os.path.normpath(__file__))
FILE_STORAGE_DIR = os.path.join(BASE_PATH, 'tests_file_storage', str(os.getpid()))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = ['*']
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test',
'TEST_CHARSET': 'utf8',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'special_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'another_special_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-uk'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^seu9e!%cy'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_nose',
'guardian',
'rest_framework_extensions',
'tests_app',
)
STATIC_URL = '/static/'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
AUTH_USER_MODEL = 'auth.User'
import django
if django.VERSION < (1, 3):
INSTALLED_APPS += ('staticfiles',)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--processes=%s' % multiprocessing.cpu_count(),
'--process-timeout=100',
'--nocapture',
]
NOSE_PLUGINS = [
'plugins.UnitTestDiscoveryPlugin',
'plugins.PrepareRestFrameworkSettingsPlugin',
'plugins.FlushCache',
'plugins.PrepareFileStorageDir'
]
# guardian
ANONYMOUS_USER_ID = -1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # this is default
'guardian.backends.ObjectPermissionBackend',
) | mit |
rwl/PyCIM | CIM14/CDPSM/Unbalanced/IEC61970/Generation/__init__.py | 3 | 1471 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package contains packages that have information for Unit Commitment and Economic Dispatch of Hydro and Thermal Generating Units, Load Forecasting, Automatic Generation Control, and Unit Modeling for Dynamic Training Simulator.
"""
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#Generation"
nsPrefix = "cimGeneration"
| mit |
MinchinWeb/minchin.text | setup.py | 1 | 4407 | import codecs
import os
import re
from pathlib import Path
import setuptools
here = Path(__file__).resolve().parent
def read(*parts):
# convert `here` to string for Python versions before 3.6
return codecs.open(os.path.join(str(here), *parts), "r", encoding="UTF-8").read()
def find_meta(*meta_file_parts, meta_key):
"""
Extract __*meta*__ from meta_file
"""
meta_file = read(*meta_file_parts)
meta_match = re.search(
r"^__{}__ = ['\"]([^'\"]*)['\"]".format(meta_key), meta_file, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{}__ string.".format(meta_key))
##############################################################################
# PACKAGE METADATA #
##############################################################################
META_PATH = ["minchin", "text.py"]
NAME = find_meta(*META_PATH, meta_key="title").lower()
VERSION = find_meta(*META_PATH, meta_key="version")
SHORT_DESC = find_meta(*META_PATH, meta_key="description")
LONG_DESC = "\n\n".join(
[(here / "README.rst").open().read(), (here / "CHANGELOG.rst").open().read()]
)
AUTHOR = find_meta(*META_PATH, meta_key="author")
AUTHOR_EMAIL = find_meta(*META_PATH, meta_key="email")
URL = find_meta(*META_PATH, meta_key="url")
LICENSE = find_meta(*META_PATH, meta_key="license")
PACKAGES = setuptools.find_packages()
INSTALL_REQUIRES = [
"colorama >= 0.2.5",
]
EXTRA_REQUIRES = {
':python_version=="2.6"': ["backports.shutil_get_terminal_size>=1.0.0"],
':python_version=="2.7"': ["backports.shutil_get_terminal_size>=1.0.0"],
':python_version=="3.2"': ["backports.shutil_get_terminal_size>=1.0.0"],
"build": [
"pip",
"setuptools>=18.0",
"twine",
"wheel",
"minchin.releaser>=0.7.0"
],
"docs": [
# 'sphinx >= 1.4', # theme requires at least 1.4
# 'cloud_sptheme >=1.8',
# 'releases',
# 'Babel >=1.3,!=2.0', # 2.0 breaks on Windows
],
"test": [
# 'green >=1.9.4', # v2 works
# 'coverage',
# 'isort',
# 'pydocstyle',
# 'pycodestyle',
# 'check-manifest'
],
"dev": [
"black",
],
}
# full list of Classifiers at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
# having an unknown classifier should keep PyPI from accepting the
# package as an upload
# "Private :: Do Not Upload",
# "Development Status :: 1 - Planning",
# "Development Status :: 2 - Pre-Alpha",
# "Development Status :: 3 - Alpha",
# "Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
# "Development Status :: 6 - Mature",
# "Development Status :: 7 - Inactive",
# "Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
# "Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
# "Programming Language :: Python :: 3 :: Only",
"Natural Language :: English",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries :: Python Modules",
]
##############################################################################
if LICENSE in ["MIT License"]:
CLASSIFIERS += ["License :: OSI Approved :: {}".format(LICENSE)]
# add 'all' key to EXTRA_REQUIRES
all_requires = []
for k, v in EXTRA_REQUIRES.items():
all_requires.extend(v)
EXTRA_REQUIRES["all"] = all_requires
setuptools.setup(
name=NAME,
version=VERSION,
url=URL,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=SHORT_DESC,
long_description=LONG_DESC,
long_description_content_type="text/x-rst",
packages=PACKAGES,
package_data={"": ["README.rst", "LICENSE"]},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRES,
platforms="any",
classifiers=CLASSIFIERS,
namespace_packages=["minchin",],
)
| mit |
paulcalabro/zato | code/zato-client/test/zato/client/test_client.py | 6 | 15178 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from unittest import TestCase
from uuid import uuid4
# anyjson
from anyjson import dumps, loads
# lxml
from lxml import etree
# mock
from mock import patch
# nose
from nose.tools import eq_
# Zato
from zato.common import common_namespaces, ZATO_OK
from zato.common.test import rand_bool, rand_int, rand_object, rand_string
from zato.common.util import new_cid, make_repr
from zato.client import AnyServiceInvoker, CID_NO_CLIP, _Client, JSONClient, JSONSIOClient, \
RawDataClient, _Response, SOAPClient, SOAPSIOClient, _StructuredResponse, XMLClient
# ##############################################################################
class FakeInnerResponse(object):
def __init__(self, headers, ok, text, status_code):
self.headers = headers
self.ok = ok
self.text = text
self.status_code = status_code
class FakeSession(object):
def __init__(self, response=None, auth=None):
self.response = response
self.auth = auth
def post(self, address, request, headers):
return self.response
# ##############################################################################
class _Base(TestCase):
client_class = None
def setUp(self):
self.url = rand_string()
self.auth = None
self.path = rand_string()
self.session = FakeSession()
self.to_bunch = rand_bool()
self.max_response_repr = 10000
self.max_cid_repr = rand_int()
self.logger = rand_object()
def get_client(self, response):
self.session.response = response
return self.client_class(
self.url, self.auth, self.path, self.session,
self.to_bunch, self.max_response_repr, self.max_cid_repr)
# ##############################################################################
class JSONClientTestCase(_Base):
client_class = JSONClient
def test_client(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
text = dumps({rand_string(): rand_string()})
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke()
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.data.items(), loads(text).items())
eq_(response.has_data, True)
eq_(response.cid, cid)
class XMLClientTestCase(_Base):
client_class = XMLClient
def test_client(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
text = '<abc>{}</abc>'.format(rand_string())
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke()
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(etree.tostring(response.data), text)
eq_(response.has_data, True)
eq_(response.cid, cid)
class SOAPClientTestCase(_Base):
client_class = SOAPClient
def test_client_ok(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
_rand = rand_string()
soap_action = rand_string()
text = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Body>
<abc>{}</abc>
</soapenv:Body>
</soapenv:Envelope>""".format(_rand).strip()
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke(soap_action)
expected_response_data = """
<abc xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">{}</abc>
""".format(_rand).strip()
eq_(response.details, None)
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(etree.tostring(response.data), expected_response_data)
eq_(response.has_data, True)
eq_(response.cid, cid)
def test_client_no_soap_response(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = False
soap_action = rand_string()
text = '<abc/>'
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke(soap_action)
eq_(response.ok, ok)
eq_(response.details, 'No /soapenv:Envelope/soapenv:Body/*[1] in SOAP response')
eq_(response.inner.text, text)
eq_(response.has_data, False)
eq_(response.cid, cid)
# ##############################################################################
class JSONSIOClientTestCase(_Base):
client_class = JSONSIOClient
def test_client(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
env = {
'details': rand_string(),
'result': ZATO_OK,
'cid': cid
}
sio_payload_key = rand_string()
sio_payload = {rand_string(): rand_string()}
sio_response = {
'zato_env': env,
sio_payload_key: sio_payload
}
text = dumps(sio_response)
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke()
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.data.items(), sio_response[sio_payload_key].items())
eq_(response.has_data, True)
eq_(response.cid, cid)
eq_(response.cid, sio_response['zato_env']['cid'])
eq_(response.details, sio_response['zato_env']['details'])
class SOAPSIOClientTestCase(_Base):
client_class = SOAPSIOClient
def test_client_ok(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
status_code = rand_int()
rand_id, rand_name, soap_action = rand_string(), rand_string(), rand_string()
sio_response = """<zato_outgoing_amqp_edit_response xmlns="https://zato.io/ns/20130518">
<zato_env>
<cid>{}</cid>
<result>ZATO_OK</result>
</zato_env>
<item>
<id>{}</id>
<name>crm.account</name>
</item>
</zato_outgoing_amqp_edit_response>
""".format(cid, rand_id, rand_name)
text = """<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns="https://zato.io/ns/20130518">
<soap:Body>
{}
</soap:Body>
</soap:Envelope>""".format(sio_response).strip()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke(soap_action, '')
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.has_data, True)
eq_(response.cid, cid)
path_items = (
('zato_env', 'cid'),
('zato_env', 'result'),
('item', 'id'),
('item', 'name'),
)
for items in path_items:
path = '//zato:zato_outgoing_amqp_edit_response/zato:' + '/zato:'.join(items)
xpath = etree.XPath(path, namespaces=common_namespaces)
expected = xpath(etree.fromstring(text))[0].text
actual = xpath(response.data)[0]
self.assertEquals(expected, actual)
def test_client_soap_fault(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = False
status_code = rand_int()
soap_action = rand_string()
text = b"""<?xml version='1.0' encoding='UTF-8'?>
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/1999/XMLSchema">
<SOAP-ENV:Body>
<SOAP-ENV:Fault>
<faultcode>SOAP-ENV:Client</faultcode>
<faultstring><![CDATA[cid [K68438211212681798524426103126], faultstring
[Traceback (most recent call last):
File
"/opt/zato/code/zato-server/src/zato/server/connection/http_soap/
channel.py", line 126, in dispatch
service_info, response = handler.handle(cid, wsgi_environ, payload, transport,
worker_store, self.simple_io_config, data_format, path_info)
File
"/opt/zato/code/zato-server/src/zato/server/connection/http_soap/
channel.py", line 227, in handle
service_instance.handle()
File
"/opt/zato/code/zato-server/src/zato/server/service/internal/
definition/amqp.py", line 174, in handle
filter(ConnDefAMQP.id==self.request.input.id).\
File
"/opt/zato/code/eggs/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/
orm/query.py", line 2190, in one
raise orm_exc.NoResultFound("No row was found for one()")
NoResultFound: No row was found for one()
]]]></faultstring>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>"""
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke(soap_action, '')
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.has_data, False)
eq_(response.cid, cid)
eq_('NoResultFound: No row was found for one()' in response.details.getchildren()[1].text, True)
# ##############################################################################
class AnyServiceInvokerTestCase(_Base):
client_class = AnyServiceInvoker
def test_client(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
status_code = rand_int()
service_name = rand_string()
service_response_name = '{}_response'.format(service_name)
service_response_payload = {'service_id':5207, 'has_wsdl':True}
service_response_dict = {'zato_service_has_wsdl_response':service_response_payload}
service_response = dumps(service_response_dict).encode('base64')
text = dumps({
'zato_env':{'result':ZATO_OK, 'details':''},
service_response_name: {
'response':service_response
}
})
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke(service_name, '')
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.data.items(), service_response_payload.items())
eq_(response.has_data, True)
eq_(response.cid, cid)
# ##############################################################################
class RawDataClientTestCase(_Base):
client_class = RawDataClient
def test_client(self):
cid = new_cid()
headers = {'x-zato-cid':cid}
ok = True
text = rand_string()
status_code = rand_int()
client = self.get_client(FakeInnerResponse(headers, ok, text, status_code))
response = client.invoke()
eq_(response.ok, ok)
eq_(response.inner.text, text)
eq_(response.data, text)
eq_(response.has_data, True)
eq_(response.cid, cid)
# ##############################################################################
class NotImplementedErrorTestCase(_Base):
def test_not_implemented_error(self):
inner = FakeInnerResponse({}, rand_int(), rand_string(), rand_int())
response_data = (inner, rand_bool(), rand_int(), rand_int(), None)
self.assertRaises(NotImplementedError, _Response, *response_data)
self.assertRaises(NotImplementedError, _StructuredResponse(*response_data).load_func)
self.assertRaises(NotImplementedError, _StructuredResponse(*response_data).set_has_data)
class TestResponse(TestCase):
def test_repr(self):
class MyResponse(_Response):
def init(self):
pass
cid = new_cid()
ok = True
text = rand_string()
status_code = rand_int()
inner_params = ({'x-zato-cid':cid}, ok, text, status_code)
max_repr = ((3,3), (len(text), CID_NO_CLIP))
for(max_response_repr, max_cid_repr) in max_repr:
inner = FakeInnerResponse(*inner_params)
response = MyResponse(inner, False, max_response_repr, max_cid_repr, None)
response.ok = ok
cid_ellipsis = '' if max_cid_repr == CID_NO_CLIP else '..'
expected = 'ok:[{}] inner.status_code:[{}] cid:[{}{}{}], inner.text:[{}]>'.format(
ok, status_code, cid[:max_cid_repr], cid_ellipsis, cid[-max_cid_repr:], text[:max_response_repr])
eq_(repr(response).endswith(expected), True)
class TestSettingSessionAuth(TestCase):
def test_setting_session_auth_no_previous_auth(self):
auth = (uuid4().hex, uuid4().hex)
client = _Client(uuid4().hex, uuid4().hex, auth)
self.assertEqual(client.session.auth, auth)
def test_setting_session_auth_has_previous_auth(self):
auth1 = (uuid4().hex, uuid4().hex)
auth2 = (uuid4().hex, uuid4().hex)
session = FakeSession(uuid4, auth1)
client = _Client(uuid4().hex, uuid4().hex, auth2, session=session)
# Make sure we don't override already existing auth
self.assertNotEqual(client.session.auth, auth2)
# The previous auth should still be there
self.assertEqual(client.session.auth, auth1)
class TestHeaders(TestCase):
""" GH #221 - Clients don't always properly pass headers on to super classes.
"""
class InnerInvokeResponse(object):
def __init__(self, request, response_class, async, headers):
self.request = request
self.response_class = response_class
self.async = async
self.headers = headers
def __repr__(self):
return make_repr(self)
def get_inner_invoke(self):
return self.InnerInvokeResponse
def test_clients(self):
for class_ in AnyServiceInvoker, JSONClient, JSONSIOClient, XMLClient, RawDataClient, SOAPClient, SOAPSIOClient:
with patch('zato.client._Client.inner_invoke', self.get_inner_invoke()):
client = class_(*rand_string(2))
header1, value1 = rand_string(2)
header2, value2 = rand_string(2)
headers = {header1:value1, header2:value2}
response = client.invoke(rand_string(), headers=headers)
eq_(sorted(headers.items()), sorted(response.headers.items()))
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/ShiftedMassApprox/Solve/MHDapply.py | 2 | 3223 | from dolfin import assemble, MixedFunctionSpace
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
import P as PrecondMulti
import NSprecond
import MaxwellPrecond as MP
import CheckPetsc4py as CP
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class Direct(BaseMyPC):
def __init__(self, Fspace, P,Q,F,L):
self.Fspace = Fspace
self.P = P
self.Q = Q
self.F = F
self.L = L
self.NS_is = PETSc.IS().createGeneral(range(Fspace[0].dim()+Fspace[1].dim()))
self.M_is = PETSc.IS().createGeneral(range(Fspace[0].dim()+Fspace[1].dim(),Fspace[0].dim()+Fspace[1].dim()+Fspace[2].dim()+Fspace[3].dim()))
def create(self, pc):
self.diag = None
kspNS = PETSc.KSP()
kspNS.create(comm=PETSc.COMM_WORLD)
pcNS = kspNS.getPC()
kspNS.setType('gmres')
pcNS.setType('python')
pcNS.setPythonContext(NSprecond.PCDdirect(MixedFunctionSpace([self.Fspace[0],self.Fspace[1]]), self.Q, self.F, self.L))
kspNS.setTolerances(1e-3)
kspNS.setFromOptions()
self.kspNS = kspNS
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pcM = kspM.getPC()
kspM.setType('gmres')
pcM.setType('python')
kspM.setTolerances(1e-3)
pcM.setPythonContext(MP.Direct(MixedFunctionSpace([self.Fspace[2],self.Fspace[3]])))
kspM.setFromOptions()
self.kspM = kspM
def setUp(self, pc):
Ans = PETSc.Mat().createPython([self.Fspace[0].dim()+self.Fspace[1].dim(), self.Fspace[0].dim()+self.Fspace[1].dim()])
Ans.setType('python')
Am = PETSc.Mat().createPython([self.Fspace[2].dim()+self.Fspace[3].dim(), self.Fspace[2].dim()+self.Fspace[3].dim()])
Am.setType('python')
NSp = PrecondMulti.NSP(self.Fspace,self.P,self.Q,self.L,self.F)
Mp = PrecondMulti.MP(self.Fspace,self.P)
Ans.setPythonContext(NSp)
Ans.setUp()
Am.setPythonContext(Mp)
Am.setUp()
self.kspNS.setOperators(Ans,self.P.getSubMatrix(self.NS_is,self.NS_is))
self.kspM.setOperators(Am,self.P.getSubMatrix(self.M_is,self.M_is))
# print self.kspNS.view()
def apply(self, pc, x, y):
# self.kspCurlCurl.setOperators(self.B)
x1 = x.getSubVector(self.NS_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.M_is)
y2 = x2.duplicate()
reshist = {}
def monitor(ksp, its, fgnorm):
reshist[its] = fgnorm
self.kspM.setMonitor(monitor)
self.kspNS.solve(x1, y1)
self.kspM.solve(x2, y2)
print reshist
for line in reshist.values():
print line
y.array = (np.concatenate([y1.array,y2.array]))
| mit |
mic4ael/indico | indico/core/settings/proxy_test.py | 1 | 7682 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime, timedelta
import pytest
import pytz
from indico.core.settings import PrefixSettingsProxy, SettingsProxy
from indico.core.settings.converters import DatetimeConverter, TimedeltaConverter
from indico.modules.events.settings import EventSettingsProxy
from indico.modules.users import User
def test_proxy_strict_nodefaults():
with pytest.raises(ValueError):
SettingsProxy('test', {})
@pytest.mark.usefixtures('db')
def test_proxy_strict_off():
proxy = SettingsProxy('test', {}, False)
assert proxy.get('foo') is None
proxy.get('foo', 'bar') == 'bar'
proxy.set('foo', 'foobar')
assert proxy.get('foo') == 'foobar'
@pytest.mark.usefixtures('db')
def test_proxy_strict():
proxy = SettingsProxy('test', {'hello': 'world'})
pytest.raises(ValueError, proxy.get, 'foo')
pytest.raises(ValueError, proxy.get, 'foo', 'bar')
pytest.raises(ValueError, proxy.set, 'foo', 'foobar')
pytest.raises(ValueError, proxy.set_multi, {'hello': 'world', 'foo': 'foobar'})
pytest.raises(ValueError, proxy.delete, 'hello', 'foo')
assert proxy.get('hello') == 'world'
@pytest.mark.usefixtures('db', 'request_context') # use req ctx so the cache is active
def test_proxy_defaults():
proxy = SettingsProxy('test', {'hello': 'world', 'foo': None})
assert proxy.get('hello') == 'world'
assert proxy.get('foo') is None
assert proxy.get('foo', 'bar') == 'bar'
assert not proxy.get_all(True)
proxy.set('foo', 'bar')
assert proxy.get_all(True) == {'foo': 'bar'}
assert proxy.get_all() == {'hello': 'world', 'foo': 'bar'}
@pytest.mark.usefixtures('db')
def test_proxy_delete_all():
defaults = {'hello': 'world', 'foo': None}
proxy = SettingsProxy('test', defaults)
assert proxy.get_all() == defaults
proxy.set('hello', 'test')
assert proxy.get_all() == {'hello': 'test', 'foo': None}
proxy.delete_all()
assert proxy.get_all() == defaults
@pytest.mark.usefixtures('db')
def test_proxy_converters_all():
epoch_dt = datetime(1970, 1, 1, tzinfo=pytz.utc)
xmas_dt = datetime(2016, 12, 24, 20, tzinfo=pytz.utc)
newyear_dt = datetime(2017, 1, 2, tzinfo=pytz.utc)
duration = timedelta(days=2)
defaults = {'epoch': epoch_dt, 'xmas': None, 'newyear': None, 'duration': None}
converters = {name: DatetimeConverter if name != 'duration' else TimedeltaConverter for name in defaults}
proxy = SettingsProxy('test', defaults, converters=converters)
proxy.set('xmas', xmas_dt)
proxy.set_multi({'newyear': newyear_dt, 'duration': duration})
assert proxy.get_all() == {'epoch': epoch_dt, 'xmas': xmas_dt, 'newyear': newyear_dt, 'duration': duration}
@pytest.mark.usefixtures('db', 'request_context') # use req ctx so the cache is active
def test_proxy_preload(count_queries):
defaults = {'hello': 'world', 'foo': None, 'bar': None}
proxy = SettingsProxy('test', defaults)
proxy.set('bar', 'test')
with count_queries() as cnt:
# this one preloads
assert proxy.get('hello') == 'world'
assert cnt() == 1
with count_queries() as cnt:
# this one has no value in the db
assert proxy.get('foo') is None
assert proxy.get('foo', 'bar') == 'bar'
assert cnt() == 0
with count_queries() as cnt:
assert proxy.get('bar') == 'test'
assert cnt() == 0
@pytest.mark.usefixtures('db', 'request_context') # use req ctx so the cache is active
def test_proxy_cache_mutable():
proxy = SettingsProxy('test', {'foo': []})
foo = proxy.get('foo')
assert foo is not proxy.defaults['foo']
foo.append('test')
assert not proxy.get('foo')
@pytest.mark.usefixtures('db')
def test_acls_invalid():
user = User()
proxy = SettingsProxy('foo', {'reg': None}, acls={'acl'})
pytest.raises(ValueError, proxy.get, 'acl')
pytest.raises(ValueError, proxy.set, 'acl', 'foo')
pytest.raises(ValueError, proxy.acls.get, 'reg')
pytest.raises(ValueError, proxy.acls.set, 'reg', {user})
pytest.raises(ValueError, proxy.acls.contains_user, 'reg', user)
pytest.raises(ValueError, proxy.acls.add_principal, 'reg', user)
pytest.raises(ValueError, proxy.acls.remove_principal, 'reg', user)
@pytest.mark.usefixtures('db')
def test_get_all_acls():
proxy = SettingsProxy('foo', {'reg': None}, acls={'acl'})
assert proxy.get_all() == {'reg': None, 'acl': set()}
@pytest.mark.usefixtures('db')
def test_acls(dummy_user, create_user):
other_user = create_user(123)
proxy = SettingsProxy('foo', acls={'acl'})
assert proxy.acls.get('acl') == set()
proxy.acls.set('acl', {dummy_user})
assert proxy.acls.get('acl') == {dummy_user}
assert proxy.acls.contains_user('acl', dummy_user)
assert not proxy.acls.contains_user('acl', other_user)
proxy.acls.add_principal('acl', other_user)
assert proxy.acls.contains_user('acl', other_user)
assert proxy.acls.get('acl') == {dummy_user, other_user}
proxy.acls.remove_principal('acl', dummy_user)
assert proxy.acls.get('acl') == {other_user}
def test_delete_propagate(mocker):
Setting = mocker.patch('indico.core.settings.proxy.Setting')
SettingPrincipal = mocker.patch('indico.core.settings.proxy.SettingPrincipal')
proxy = SettingsProxy('foo', {'reg': None}, acls={'acl'})
proxy.delete('reg', 'acl')
Setting.delete.assert_called_once_with('foo', 'reg')
SettingPrincipal.delete.assert_called_with('foo', 'acl')
def test_set_multi_propagate(mocker):
Setting = mocker.patch('indico.core.settings.proxy.Setting')
SettingPrincipal = mocker.patch('indico.core.settings.proxy.SettingPrincipal')
proxy = SettingsProxy('foo', {'reg': None}, acls={'acl'})
proxy.set_multi({
'reg': 'bar',
'acl': {'u'}
})
Setting.set_multi.assert_called_once_with('foo', {'reg': 'bar'})
SettingPrincipal.set_acl_multi.assert_called_with('foo', {'acl': {'u'}})
def test_prefix_settings_invalid():
foo_proxy = SettingsProxy('foo', {'a': 1, 'b': 2})
bar_proxy = SettingsProxy('bar', {'x': 3, 'y': 4})
proxy = PrefixSettingsProxy({'foo': foo_proxy, 'bar': bar_proxy})
pytest.raises(ValueError, proxy.get, 'x')
pytest.raises(ValueError, proxy.get, 'x_y')
pytest.raises(ValueError, proxy.set, 'x', 'test')
pytest.raises(ValueError, proxy.set, 'x_y', 'test')
@pytest.mark.parametrize('with_arg', (True, False))
@pytest.mark.usefixtures('db')
def test_prefix_settings(dummy_event, with_arg):
kw = {'arg': dummy_event} if with_arg else {'arg': None}
cls = EventSettingsProxy if with_arg else SettingsProxy
foo_proxy = cls('foo', {'a': 1, 'b': 2})
bar_proxy = cls('bar', {'x': None, 'y': 4})
proxy = PrefixSettingsProxy({'foo': foo_proxy, 'bar': bar_proxy}, has_arg=with_arg)
proxy.set('bar_x', 3, **kw)
assert proxy.get_all(**kw) == {'foo_a': 1, 'foo_b': 2, 'bar_x': 3, 'bar_y': 4}
assert proxy.get_all(no_defaults=True, **kw) == {'bar_x': 3}
assert proxy.get('foo_a', **kw) == 1
assert proxy.get('bar_y', 'test', **kw) == 'test'
proxy.set_multi({'foo_a': 11, 'bar_x': 33}, **kw)
assert proxy.get('foo_a', **kw) == 11
assert proxy.get('bar_x', **kw) == 33
proxy.delete('foo_a', 'bar_x', **kw)
assert proxy.get('foo_a', **kw) == 1
assert proxy.get('bar_x', **kw) is None
proxy.set_multi({'foo_a': 11, 'bar_x': 33}, **kw)
proxy.delete_all(**kw)
assert proxy.get_all(no_defaults=True, **kw) == {}
| mit |
leasunhy/GalaxyOJ | server/views/post_view.py | 1 | 4119 | from . import post
from .. import db
from ..models import Post, Notification, Solution, Tutorial
from ..forms import EditNotificationForm, EditTutorialForm, EditSolutionForm
from ..tools import privilege_required, count_page
from flask import render_template, url_for, request, redirect, flash, jsonify, session
from flask.ext.login import current_user, login_required
@post.route('/notifications')
@post.route('/notifications/<int:page>')
def notifications(page = 1):
notifs = Notification.query.paginate(page=page, per_page=20).items
return render_template('notifications.html', posts=notifs,
page=page, page_count=count_page(Notification, 20))
@post.route('/solutions')
@post.route('/solutions/<int:page>')
def solutions(page = 1):
solutions = Solution.query.paginate(page=page, per_page=20).items
return render_template('solution_list.html', posts=solutions,
page=page, page_count=count_page(Solution, 20))
@post.route('/tutorials')
@post.route('/tutorials/<int:page>')
def tutorials(page = 1):
tutorials = Tutorial.query.paginate(page=page, per_page=20).items
return render_template('tutorial_list.html', posts=tutorials,
page=page, page_count=count_page(Tutorial, 20))
@post.route('/notification/<int:id>')
def notification(id):
p = Notification.query.get_or_404(id)
return render_template('show_post_base.html', post = p)
@post.route('/solution/<int:id>')
def solution(id):
p = Solution.query.get_or_404(id)
return render_template('show_post_base.html', post = p)
@post.route('/tutorial/<int:id>')
def tutorial(id):
p = Tutorial.query.get_or_404(id)
return render_template('show_post_base.html', post = p)
@post.route('/new_notification', methods=['GET', 'POST'])
@privilege_required(1)
def new_notification():
return edit_notification(0)
@post.route('/new_tutorial', methods=['GET', 'POST'])
@login_required
def new_tutorial():
return edit_tutorial(0)
@post.route('/new_solution', methods=['GET', 'POST'])
@login_required
def new_solution():
return edit_solution(0)
@post.route('/edit_notification/<int:id>', methods=['GET', 'POST'])
@privilege_required(1)
def edit_notification(id=0):
p = Notification() if id == 0 else Notification.query.get_or_404(id)
form = EditNotificationForm(obj = p)
if form.validate_on_submit():
form.populate_obj(p)
if not p.owner:
p.owner = current_user
db.session.add(p)
db.session.commit()
return redirect(url_for('post.notifications'))
return render_template('edit_notification.html', form=form, pid=id)
@post.route('/edit_tutorial/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_tutorial(id=0):
p = Tutorial() if id == 0 else Tutorial.query.get_or_404(id)
if id != 0 and not current_user.is_admin and current_user != p.owner:
abort(401)
form = EditTutorialForm(obj = p)
if form.validate_on_submit():
form.populate_obj(p)
if not p.owner:
p.owner = current_user
db.session.add(p)
db.session.commit()
return redirect(url_for('post.tutorials'))
return render_template('edit_tutorial.html', form=form, pid=id)
@post.route('/edit_solution/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_solution(id=0):
p = Solution() if id == 0 else Solution.query.get_or_404(id)
if id != 0 and not current_user.is_admin and current_user != p.owner:
abort(401)
form = EditSolutionForm(obj = p)
if form.validate_on_submit():
form.populate_obj(p)
if not p.owner:
p.owner = current_user
db.session.add(p)
db.session.commit()
return redirect(url_for('post.solutions'))
return render_template('edit_solution.html', form=form, pid=id)
@post.route('/delete_post/<int:id>')
@privilege_required(1)
def delete_post(id):
p = Post.query.get_or_404(id)
db.session.delete(p)
db.session.commit()
flash('Post successfully deleted.')
return redirect(url_for('main.index'))
| gpl-3.0 |
schumi2004/NOT_UPDATED_Sick-Beard-Dutch | lib/imdb/parser/sql/objectadapter.py | 57 | 7832 | """
parser.sql.objectadapter module (imdb.parser.sql package).
This module adapts the SQLObject ORM to the internal mechanism.
Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import logging
from sqlobject import *
from sqlobject.sqlbuilder import ISNULL, ISNOTNULL, AND, OR, IN, CONTAINSSTRING
from dbschema import *
_object_logger = logging.getLogger('imdbpy.parser.sql.object')
# Maps our placeholders to SQLAlchemy's column types.
MAP_COLS = {
INTCOL: IntCol,
UNICODECOL: UnicodeCol,
STRINGCOL: StringCol
}
# Exception raised when Table.get(id) returns no value.
NotFoundError = SQLObjectNotFound
# class method to be added to the SQLObject class.
def addIndexes(cls, ifNotExists=True):
"""Create all required indexes."""
for col in cls._imdbpySchema.cols:
if col.index:
idxName = col.index
colToIdx = col.name
if col.indexLen:
colToIdx = {'column': col.name, 'length': col.indexLen}
if idxName in [i.name for i in cls.sqlmeta.indexes]:
# Check if the index is already present.
continue
idx = DatabaseIndex(colToIdx, name=idxName)
cls.sqlmeta.addIndex(idx)
try:
cls.createIndexes(ifNotExists)
except dberrors.OperationalError, e:
_object_logger.warn('Skipping creation of the %s.%s index: %s' %
(cls.sqlmeta.table, col.name, e))
addIndexes = classmethod(addIndexes)
# Global repository for "fake" tables with Foreign Keys - need to
# prevent troubles if addForeignKeys is called more than one time.
FAKE_TABLES_REPOSITORY = {}
def _buildFakeFKTable(cls, fakeTableName):
"""Return a "fake" table, with foreign keys where needed."""
countCols = 0
attrs = {}
for col in cls._imdbpySchema.cols:
countCols += 1
if col.name == 'id':
continue
if not col.foreignKey:
# A non-foreign key column - add it as usual.
attrs[col.name] = MAP_COLS[col.kind](**col.params)
continue
# XXX: Foreign Keys pointing to TableName.ColName not yet supported.
thisColName = col.name
if thisColName.endswith('ID'):
thisColName = thisColName[:-2]
fks = col.foreignKey.split('.', 1)
foreignTableName = fks[0]
if len(fks) == 2:
foreignColName = fks[1]
else:
foreignColName = 'id'
# Unused...
#fkName = 'fk_%s_%s_%d' % (foreignTableName, foreignColName,
# countCols)
# Create a Foreign Key column, with the correct references.
fk = ForeignKey(foreignTableName, name=thisColName, default=None)
attrs[thisColName] = fk
# Build a _NEW_ SQLObject subclass, with foreign keys, if needed.
newcls = type(fakeTableName, (SQLObject,), attrs)
return newcls
def addForeignKeys(cls, mapTables, ifNotExists=True):
"""Create all required foreign keys."""
# Do not even try, if there are no FK, in this table.
if not filter(None, [col.foreignKey for col in cls._imdbpySchema.cols]):
return
fakeTableName = 'myfaketable%s' % cls.sqlmeta.table
if fakeTableName in FAKE_TABLES_REPOSITORY:
newcls = FAKE_TABLES_REPOSITORY[fakeTableName]
else:
newcls = _buildFakeFKTable(cls, fakeTableName)
FAKE_TABLES_REPOSITORY[fakeTableName] = newcls
# Connect the class with foreign keys.
newcls.setConnection(cls._connection)
for col in cls._imdbpySchema.cols:
if col.name == 'id':
continue
if not col.foreignKey:
continue
# Get the SQL that _WOULD BE_ run, if we had to create
# this "fake" table.
fkQuery = newcls._connection.createReferenceConstraint(newcls,
newcls.sqlmeta.columns[col.name])
if not fkQuery:
# Probably the db doesn't support foreign keys (SQLite).
continue
# Remove "myfaketable" to get references to _real_ tables.
fkQuery = fkQuery.replace('myfaketable', '')
# Execute the query.
newcls._connection.query(fkQuery)
# Disconnect it.
newcls._connection.close()
addForeignKeys = classmethod(addForeignKeys)
# Module-level "cache" for SQLObject classes, to prevent
# "class TheClass is already in the registry" errors, when
# two or more connections to the database are made.
# XXX: is this the best way to act?
TABLES_REPOSITORY = {}
def getDBTables(uri=None):
"""Return a list of classes to be used to access the database
through the SQLObject ORM. The connection uri is optional, and
can be used to tailor the db schema to specific needs."""
DB_TABLES = []
for table in DB_SCHEMA:
if table.name in TABLES_REPOSITORY:
DB_TABLES.append(TABLES_REPOSITORY[table.name])
continue
attrs = {'_imdbpyName': table.name, '_imdbpySchema': table,
'addIndexes': addIndexes, 'addForeignKeys': addForeignKeys}
for col in table.cols:
if col.name == 'id':
continue
attrs[col.name] = MAP_COLS[col.kind](**col.params)
# Create a subclass of SQLObject.
# XXX: use a metaclass? I can't see any advantage.
cls = type(table.name, (SQLObject,), attrs)
DB_TABLES.append(cls)
TABLES_REPOSITORY[table.name] = cls
return DB_TABLES
def toUTF8(s):
"""For some strange reason, sometimes SQLObject wants utf8 strings
instead of unicode."""
return s.encode('utf_8')
def setConnection(uri, tables, encoding='utf8', debug=False):
"""Set connection for every table."""
kw = {}
# FIXME: it's absolutely unclear what we should do to correctly
# support unicode in MySQL; with some versions of SQLObject,
# it seems that setting use_unicode=1 is the _wrong_ thing to do.
_uriLower = uri.lower()
if _uriLower.startswith('mysql'):
kw['use_unicode'] = 1
#kw['sqlobject_encoding'] = encoding
kw['charset'] = encoding
conn = connectionForURI(uri, **kw)
conn.debug = debug
# XXX: doesn't work and a work-around was put in imdbpy2sql.py;
# is there any way to modify the text_factory parameter of
# a SQLite connection?
#if uri.startswith('sqlite'):
# major = sys.version_info[0]
# minor = sys.version_info[1]
# if major > 2 or (major == 2 and minor > 5):
# sqliteConn = conn.getConnection()
# sqliteConn.text_factory = str
for table in tables:
table.setConnection(conn)
#table.sqlmeta.cacheValues = False
# FIXME: is it safe to set table._cacheValue to False? Looks like
# we can't retrieve correct values after an update (I think
# it's never needed, but...) Anyway, these are set to False
# for performance reason at insert time (see imdbpy2sql.py).
table._cacheValue = False
# Required by imdbpy2sql.py.
conn.paramstyle = conn.module.paramstyle
return conn
| gpl-3.0 |
DJMuggs/ansible-modules-extras | web_infrastructure/jboss.py | 8 | 4925 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: '"Jeroen Hoekx (@jhoekx)" <jeroen.hoekx@dsquare.be>'
"""
EXAMPLES = """
# Deploy a hello world application
- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present
# Update the hello world application
- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present
# Undeploy the hello world application
- jboss: deployment=hello.war state=absent
"""
import os
import shutil
import time
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment)))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment)))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment)))
def main():
module = AnsibleModule(
argument_spec = dict(
src=dict(),
deployment=dict(required=True),
deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
)
changed = False
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if state == 'present' and not src:
module.fail_json(msg="Argument 'src' required.")
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.'%(src))
if is_failed(deploy_path, deployment):
### Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.'%(deployment))
time.sleep(1)
changed = True
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
oliverlee/pydy | examples/Kane1985/Chapter3/Ex6.6.py | 7 | 1180 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 6.6 from Kane 1985."""
from __future__ import division
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import inertia, inertia_of_point_mass
from sympy.physics.mechanics import dot
from sympy import symbols
from sympy import S
m = symbols('m')
m_val = 12
N = ReferenceFrame('N')
pO = Point('O')
pBs = pO.locatenew('B*', -3*N.x + 2*N.y - 4*N.z)
I_B_O = inertia(N, 260*m/m_val, 325*m/m_val, 169*m/m_val,
72*m/m_val, 96*m/m_val, -144*m/m_val)
print("I_B_rel_O = {0}".format(I_B_O))
I_Bs_O = inertia_of_point_mass(m, pBs.pos_from(pO), N)
print("\nI_B*_rel_O = {0}".format(I_Bs_O))
I_B_Bs = I_B_O - I_Bs_O
print("\nI_B_rel_B* = {0}".format(I_B_Bs))
pQ = pO.locatenew('Q', -4*N.z)
I_Bs_Q = inertia_of_point_mass(m, pBs.pos_from(pQ), N)
print("\nI_B*_rel_Q = {0}".format(I_Bs_Q))
I_B_Q = I_B_Bs + I_Bs_Q
print("\nI_B_rel_Q = {0}".format(I_B_Q))
# n_a is a vector parallel to line PQ
n_a = S(3)/5 * N.x - S(4)/5 * N.z
I_a_a_B_Q = dot(dot(n_a, I_B_Q), n_a)
print("\nn_a = {0}".format(n_a))
print("\nI_a_a_B_Q = {0} = {1}".format(I_a_a_B_Q, I_a_a_B_Q.subs(m, m_val)))
| bsd-3-clause |
haxoza/django | django/utils/deprecation.py | 51 | 2897 | from __future__ import absolute_import
import inspect
import warnings
class RemovedInDjango20Warning(PendingDeprecationWarning):
pass
class RemovedInNextVersionWarning(DeprecationWarning):
pass
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning, 2
)
return super(DeprecationInstanceCheck, self).__instancecheck__(instance)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.