repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
enisoc/vitess | py/vtproto/vtworkerservice_pb2.py | 4 | 1956 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vtworkerservice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import vtworkerdata_pb2 as vtworkerdata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='vtworkerservice.proto',
package='vtworkerservice',
syntax='proto3',
serialized_pb=_b('\n\x15vtworkerservice.proto\x12\x0fvtworkerservice\x1a\x12vtworkerdata.proto2\x83\x01\n\x08Vtworker\x12w\n\x16\x45xecuteVtworkerCommand\x12+.vtworkerdata.ExecuteVtworkerCommandRequest\x1a,.vtworkerdata.ExecuteVtworkerCommandResponse\"\x00\x30\x01\x42.Z,vitess.io/vitess/go/vt/proto/vtworkerserviceb\x06proto3')
,
dependencies=[vtworkerdata__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z,vitess.io/vitess/go/vt/proto/vtworkerservice'))
_VTWORKER = _descriptor.ServiceDescriptor(
name='Vtworker',
full_name='vtworkerservice.Vtworker',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=63,
serialized_end=194,
methods=[
_descriptor.MethodDescriptor(
name='ExecuteVtworkerCommand',
full_name='vtworkerservice.Vtworker.ExecuteVtworkerCommand',
index=0,
containing_service=None,
input_type=vtworkerdata__pb2._EXECUTEVTWORKERCOMMANDREQUEST,
output_type=vtworkerdata__pb2._EXECUTEVTWORKERCOMMANDRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_VTWORKER)
DESCRIPTOR.services_by_name['Vtworker'] = _VTWORKER
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
ruohoruotsi/pyechonest | pyechonest/catalog.py | 23 | 12786 | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Scotty Vercoe on 2010-08-25.
The Catalog module loosely covers http://developer.echonest.com/docs/v4/catalog.html
Refer to the official api documentation if you are unsure about something.
"""
try:
import json
except ImportError:
import simplejson as json
import datetime
import warnings
import util
from proxies import CatalogProxy, ResultList
import artist, song
# deal with datetime in json
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.datetime) else None
def create_catalog_by_name(name, T="general"):
"""
Creates a catalog object, with a given name. Does not check to see if the catalog already exists.
Create a catalog object like
"""
result = util.callm("catalog/create", {}, POST=True,
data={"name":name, "type":T})
result = result['response']
return Catalog(result['id'], **dict( (k,result[k]) for k in ('name', 'type')))
class Catalog(CatalogProxy):
"""
A Catalog object
Attributes:
id (str): Catalog ID
name (str): Catalog Name
read (list): A list of catalog items (objects if they are resolved, else dictionaries)
feed (list): A list of dictionaries for news, blogs, reviews, audio, video for a catalog's artists
Create an catalog object like so:
>>> c = catalog.Catalog('CAGPXKK12BB06F9DE9') # get existing catalog
>>> c = catalog.Catalog('test_song_catalog', 'song') # get existing or create new catalog
"""
def __init__(self, id, type=None, **kwargs):
"""
Create a catalog object (get a catalog by ID or get or create one given by name and type)
Args:
id (str): A catalog id or name
Kwargs:
type (str): 'song' or 'artist', specifying the catalog type
Returns:
A catalog object
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> c.id
u'CAVKUPC12BCA792120'
>>> c.name
u'my_songs'
>>>
"""
super(Catalog, self).__init__(id, type, **kwargs)
def __repr__(self):
return "<%s - %s>" % (self._object_type.encode('utf-8'), self.name.encode('utf-8'))
def __str__(self):
return self.name.encode('utf-8')
def update(self, items):
"""
Update a catalog object
Args:
items (list): A list of dicts describing update data and action codes (see api docs)
Kwargs:
Returns:
A ticket id
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> items
[{'action': 'update',
'item': {'artist_name': 'dAn ThE aUtOmAtOr',
'disc_number': 1,
'genre': 'Instrumental',
'item_id': '38937DDF04BC7FC4',
'play_count': 5,
'release': 'Bombay the Hard Way: Guns, Cars & Sitars',
'song_name': 'Inspector Jay From Dehli',
'track_number': 9,
'url': 'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a'}}]
>>> ticket = c.update(items)
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
"""
post_data = {}
items_json = json.dumps(items, default=dthandler)
post_data['data'] = items_json
response = self.post_attribute("update", data=post_data)
return response['ticket']
def status(self, ticket):
"""
Check the status of a catalog update
Args:
ticket (str): A string representing a ticket ID
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
"""
return self.get_attribute_simple("status", ticket=ticket)
def get_profile(self):
"""
Check the status of a catalog update
Args:
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> c
<catalog - test_song_catalog>
>>> c.profile()
{u'id': u'CAGPXKK12BB06F9DE9',
u'name': u'test_song_catalog',
u'pending_tickets': [],
u'resolved': 2,
u'total': 4,
u'type': u'song'}
>>>
"""
result = self.get_attribute("profile")
return result['catalog']
profile = property(get_profile)
def read_items(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>>
"""
warnings.warn("catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.")
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList([])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
for item in response['catalog']['items']:
new_item = None
# song items
if 'song_id' in item:
item['id'] = item.pop('song_id')
item['title'] = item.pop('song_name')
request = item['request']
new_item = song.Song(**util.fix(item))
new_item.request = request
# artist item
elif 'artist_id' in item:
item['id'] = item.pop('artist_id')
item['name'] = item.pop('artist_name')
request = item['request']
new_item = artist.Artist(**util.fix(item))
new_item.request = request
# unresolved item
else:
new_item = item
rval.append(new_item)
return rval
read = property(read_items)
def get_item_dicts(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
"""
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval
item_dicts = property(get_item_dicts)
def get_feed(self, buckets=None, since=None, results=15, start=0):
"""
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
"""
kwargs = {}
kwargs['bucket'] = buckets or []
if since:
kwargs['since']=since
response = self.get_attribute("feed", results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval
feed = property(get_feed)
def delete(self):
"""
Deletes the entire catalog
Args:
Kwargs:
Returns:
The deleted catalog's id.
Example:
>>> c
<catalog - test_song_catalog>
>>> c.delete()
{u'id': u'CAXGUPY12BB087A21D'}
>>>
"""
return self.post_attribute("delete")
def play(self, items, plays=None):
return self.get_attribute("play", item=items, plays=plays)
def skip(self, items, skips=None):
return self.get_attribute("skip", item=items, skips=skips)
def keyvalues(self):
return self.get_attribute("keyvalues")['keyvalues']
def favorite(self, items, favorite=None):
if favorite != None:
favorite = str(favorite).lower()
return self.get_attribute("favorite", item=items, favorite=favorite)
def ban(self, items, ban=None):
if ban != None:
ban = str(ban).lower()
return self.get_attribute("ban", item=items, ban=ban)
def rate(self, items, rating=None):
return self.get_attribute("rate", item=items, rating=rating)
def get_catalog_by_name(name):
"""
Grabs a catalog by name, if its there on the api key.
Otherwise, an error is thrown (mirroring the API)
"""
kwargs = {
'name' : name,
}
result = util.callm("%s/%s" % ('catalog', 'profile'), kwargs)
return Catalog(**util.fix(result['response']['catalog']))
def list_catalogs(results=30, start=0):
"""
Returns list of all catalogs created on this API key
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of catalog objects
Example:
>>> catalog.list_catalogs()
[<catalog - test_artist_catalog>, <catalog - test_song_catalog>, <catalog - my_songs>]
>>>
"""
result = util.callm("%s/%s" % ('catalog', 'list'), {'results': results, 'start': start})
cats = [Catalog(**util.fix(d)) for d in result['response']['catalogs']]
start = result['response']['start']
total = result['response']['total']
return ResultList(cats, start, total)
| bsd-3-clause |
cosenal/osf.io | scripts/googledrive/migrate_to_external_account.py | 30 | 3090 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to migrate GoogleDrive credentials from user settings object to external
account objects.
Changes:
- Create external account for authorized user settings
- Attach external account to user settings
- Attach external account to all node settings
"""
import sys
import logging
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from framework.mongo import database
from website.addons.googledrive.model import GoogleDriveUserSettings
from website.addons.googledrive.model import GoogleDriveNodeSettings
from website.oauth.models import ExternalAccount
logger = logging.getLogger(__name__)
def do_migration(records):
database['googledrivenodesettings'].update({'user_settings': {'$type': 2}}, {'$rename': { 'user_settings': 'foreign_user_settings'}}, multi=True)
for user_addon in records:
user = user_addon.owner
old_account = user_addon.oauth_settings
logger.info('Record found for user {}'.format(user._id))
# Create/load external account and append to user
try:
account = ExternalAccount(
provider='googledrive',
provider_name='Google Drive',
display_name=old_account.username,
oauth_key=old_account.access_token,
refresh_token=old_account.refresh_token,
provider_id=old_account.user_id,
expires_at=old_account.expires_at,
)
account.save()
except KeyExistsException:
# ... or get the old one
account = ExternalAccount.find_one(
Q('provider', 'eq', 'googledrive') &
Q('provider_id', 'eq', old_account.user_id)
)
assert account is not None
user.external_accounts.append(account)
user.save()
# Remove oauth_settings from user settings object
user_addon.oauth_settings = None
user_addon.save()
logger.info('Added external account {0} to user {1}'.format(
account._id, user._id,
))
# Add external account to authorized nodes
for node in GoogleDriveNodeSettings.find():
if node.foreign_user_settings is None:
continue
logger.info('Migrating user_settings for googledrive {}'.format(node._id))
node.user_settings = node.foreign_user_settings
node.save()
def get_targets():
return GoogleDriveUserSettings.find(
Q('oauth_settings', 'ne', None)
)
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration(get_targets())
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| apache-2.0 |
mavenlin/tensorflow | tensorflow/contrib/seq2seq/python/ops/loss.py | 120 | 5411 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq loss operations for use in sequence models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = ["sequence_loss"]
def sequence_loss(logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits.
Depending on the values of `average_across_timesteps` and
`average_across_batch`, the return Tensor will have rank 0, 1, or 2 as these
arguments reduce the cross-entropy at each target, which has shape
`[batch_size, sequence_length]`, over their respective dimensions. For
example, if `average_across_timesteps` is `True` and `average_across_batch`
is `False`, then the return Tensor will have shape `[batch_size]`.
Args:
logits: A Tensor of shape
`[batch_size, sequence_length, num_decoder_symbols]` and dtype float.
The logits correspond to the prediction across all classes at each
timestep.
targets: A Tensor of shape `[batch_size, sequence_length]` and dtype
int. The target represents the true class at each timestep.
weights: A Tensor of shape `[batch_size, sequence_length]` and dtype
float. `weights` constitutes the weighting of each prediction in the
sequence. When using `weights` as masking, set all valid timesteps to 1
and all padded timesteps to 0, e.g. a mask returned by `tf.sequence_mask`.
average_across_timesteps: If set, sum the cost across the sequence
dimension and divide the cost by the total label weight across timesteps.
average_across_batch: If set, sum the cost across the batch dimension and
divide the returned cost by the batch size.
softmax_loss_function: Function (labels, logits) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
**Note that to avoid confusion, it is required for the function to accept
named arguments.**
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A float Tensor of rank 0, 1, or 2 depending on the
`average_across_timesteps` and `average_across_batch` arguments. By default,
it has rank 0 (scalar) and is the weighted average cross-entropy
(log-perplexity) per symbol.
Raises:
ValueError: logits does not have 3 dimensions or targets does not have 2
dimensions or weights does not have 2 dimensions.
"""
if len(logits.get_shape()) != 3:
raise ValueError("Logits must be a "
"[batch_size x sequence_length x logits] tensor")
if len(targets.get_shape()) != 2:
raise ValueError("Targets must be a [batch_size x sequence_length] "
"tensor")
if len(weights.get_shape()) != 2:
raise ValueError("Weights must be a [batch_size x sequence_length] "
"tensor")
with ops.name_scope(name, "sequence_loss", [logits, targets, weights]):
num_classes = array_ops.shape(logits)[2]
logits_flat = array_ops.reshape(logits, [-1, num_classes])
targets = array_ops.reshape(targets, [-1])
if softmax_loss_function is None:
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits_flat)
else:
crossent = softmax_loss_function(labels=targets, logits=logits_flat)
crossent *= array_ops.reshape(weights, [-1])
if average_across_timesteps and average_across_batch:
crossent = math_ops.reduce_sum(crossent)
total_size = math_ops.reduce_sum(weights)
total_size += 1e-12 # to avoid division by 0 for all-0 weights
crossent /= total_size
else:
batch_size = array_ops.shape(logits)[0]
sequence_length = array_ops.shape(logits)[1]
crossent = array_ops.reshape(crossent, [batch_size, sequence_length])
if average_across_timesteps and not average_across_batch:
crossent = math_ops.reduce_sum(crossent, axis=[1])
total_size = math_ops.reduce_sum(weights, axis=[1])
total_size += 1e-12 # to avoid division by 0 for all-0 weights
crossent /= total_size
if not average_across_timesteps and average_across_batch:
crossent = math_ops.reduce_sum(crossent, axis=[0])
total_size = math_ops.reduce_sum(weights, axis=[0])
total_size += 1e-12 # to avoid division by 0 for all-0 weights
crossent /= total_size
return crossent
| apache-2.0 |
Daniel-UCAS/dgCFD.jl | doc/source/conf.py | 1 | 11646 | # -*- coding: utf-8 -*-
#
# dgCFD.jl documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 01 08:43:04 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dgCFD.jl'
copyright = u'2016, Yu CHENG'
author = u'Yu CHENG'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'dgCFD.jl v0.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dgCFDjldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dgCFDjl.tex', u'dgCFD.jl Documentation',
u'Yu CHENG', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dgcfdjl', u'dgCFD.jl Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dgCFDjl', u'dgCFD.jl Documentation',
author, 'dgCFDjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit |
charlesvdv/servo | tests/wpt/css-tests/tools/py/testing/path/test_local.py | 160 | 29652 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import py
import pytest
import os, sys
from py.path import local
import common
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
"and getattr(os, '_name', None) == 'nt'")
win32only = py.test.mark.skipif(
"not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
skiponwin32 = py.test.mark.skipif(
"sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
def pytest_funcarg__path1(request):
def setup():
path1 = request.getfuncargvalue("tmpdir")
common.setuptestfs(path1)
return path1
def teardown(path1):
# post check
assert path1.join("samplefile").check()
return request.cached_setup(setup, teardown, scope="session")
class TestLocalPath(common.CommonFSTests):
def test_join_normpath(self, tmpdir):
assert tmpdir.join(".") == tmpdir
p = tmpdir.join("../%s" % tmpdir.basename)
assert p == tmpdir
p = tmpdir.join("..//%s/" % tmpdir.basename)
assert p == tmpdir
@skiponwin32
def test_dirpath_abs_no_abs(self, tmpdir):
p = tmpdir.join('foo')
assert p.dirpath('/bar') == tmpdir.join('bar')
assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
def test_gethash(self, tmpdir):
md5 = py.builtin._tryimport('md5', 'hashlib').md5
lib = py.builtin._tryimport('sha', 'hashlib')
sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
fn = tmpdir.join("testhashfile")
data = 'hello'.encode('ascii')
fn.write(data, mode="wb")
assert fn.computehash("md5") == md5(data).hexdigest()
assert fn.computehash("sha1") == sha(data).hexdigest()
py.test.raises(ValueError, fn.computehash, "asdasd")
def test_remove_removes_readonly_file(self, tmpdir):
readonly_file = tmpdir.join('readonly').ensure()
readonly_file.chmod(0)
readonly_file.remove()
assert not readonly_file.check(exists=1)
def test_remove_removes_readonly_dir(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_dir.chmod(int("500", 8))
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_removes_dir_and_readonly_file(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_file = readonly_dir.join('readonlyfile').ensure()
readonly_file.chmod(0)
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
l = []
monkeypatch.setattr(py.std.shutil, 'rmtree',
lambda *args, **kwargs: l.append(kwargs))
tmpdir.remove()
assert not l[0]['ignore_errors']
for val in (True, False):
l[:] = []
tmpdir.remove(ignore_errors=val)
assert l[0]['ignore_errors'] == val
def test_initialize_curdir(self):
assert str(local()) == py.std.os.getcwd()
@skiponwin32
def test_chdir_gone(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
pytest.raises(py.error.ENOENT, py.path.local)
assert path1.chdir() is None
assert os.getcwd() == str(path1)
def test_as_cwd(self, path1):
dir = path1.ensure("subdir", dir=1)
old = py.path.local()
with dir.as_cwd() as x:
assert x == old
assert py.path.local() == dir
assert os.getcwd() == str(old)
def test_as_cwd_exception(self, path1):
old = py.path.local()
dir = path1.ensure("subdir", dir=1)
with pytest.raises(ValueError):
with dir.as_cwd():
raise ValueError()
assert old == py.path.local()
def test_initialize_reldir(self, path1):
with path1.as_cwd():
p = local('samplefile')
assert p.check()
@pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
def test_tilde_expansion(self, monkeypatch, tmpdir):
monkeypatch.setenv("HOME", str(tmpdir))
p = py.path.local("~", expanduser=True)
assert p == os.path.expanduser("~")
def test_eq_with_strings(self, path1):
path1 = path1.join('sampledir')
path2 = str(path1)
assert path1 == path2
assert path2 == path1
path3 = path1.join('samplefile')
assert path3 != path2
assert path2 != path3
def test_eq_with_none(self, path1):
assert path1 != None
def test_gt_with_strings(self, path1):
path2 = path1.join('sampledir')
path3 = str(path1.join("ttt"))
assert path3 > path2
assert path2 < path3
assert path2 < "ttt"
assert "ttt" > path2
path4 = path1.join("aaa")
l = [path2, path4,path3]
assert sorted(l) == [path4, path2, path3]
def test_open_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
with p.open("w", ensure=1) as f:
f.write("hello")
assert p.read() == "hello"
def test_write_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
p.write("hello", ensure=1)
assert p.read() == "hello"
@py.test.mark.multi(bin=(False, True))
def test_dump(self, tmpdir, bin):
path = tmpdir.join("dumpfile%s" % int(bin))
try:
d = {'answer' : 42}
path.dump(d, bin=bin)
f = path.open('rb+')
dnew = py.std.pickle.load(f)
assert d == dnew
finally:
f.close()
@failsonjywin32
def test_setmtime(self):
import tempfile
import time
try:
fd, name = tempfile.mkstemp()
py.std.os.close(fd)
except AttributeError:
name = tempfile.mktemp()
open(name, 'w').close()
try:
mtime = int(time.time())-100
path = local(name)
assert path.mtime() != mtime
path.setmtime(mtime)
assert path.mtime() == mtime
path.setmtime()
assert path.mtime() != mtime
finally:
py.std.os.remove(name)
def test_normpath(self, path1):
new1 = path1.join("/otherdir")
new2 = path1.join("otherdir")
assert str(new1) == str(new2)
def test_mkdtemp_creation(self):
d = local.mkdtemp()
try:
assert d.check(dir=1)
finally:
d.remove(rec=1)
def test_tmproot(self):
d = local.mkdtemp()
tmproot = local.get_temproot()
try:
assert d.check(dir=1)
assert d.dirpath() == tmproot
finally:
d.remove(rec=1)
def test_chdir(self, tmpdir):
old = local()
try:
res = tmpdir.chdir()
assert str(res) == str(old)
assert py.std.os.getcwd() == str(tmpdir)
finally:
old.chdir()
def test_ensure_filepath_withdir(self, tmpdir):
newfile = tmpdir.join('test1','test')
newfile.ensure()
assert newfile.check(file=1)
newfile.write("42")
newfile.ensure()
s = newfile.read()
assert s == "42"
def test_ensure_filepath_withoutdir(self, tmpdir):
newfile = tmpdir.join('test1file')
t = newfile.ensure()
assert t == newfile
assert newfile.check(file=1)
def test_ensure_dirpath(self, tmpdir):
newfile = tmpdir.join('test1','testfile')
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_init_from_path(self, tmpdir):
l = local()
l2 = local(l)
assert l2 == l
wc = py.path.svnwc('.')
l3 = local(wc)
assert l3 is not wc
assert l3.strpath == wc.strpath
assert not hasattr(l3, 'commit')
@py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
def test_long_filenames(self, tmpdir):
if sys.platform == "win32":
py.test.skip("win32: work around needed for path length limit")
# see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
# testing paths > 260 chars (which is Windows' limitation, but
# depending on how the paths are used), but > 4096 (which is the
# Linux' limitation) - the behaviour of paths with names > 4096 chars
# is undetermined
newfilename = '/test' * 60
l = tmpdir.join(newfilename)
l.ensure(file=True)
l.write('foo')
l2 = tmpdir.join(newfilename)
assert l2.read() == 'foo'
def test_visit_depth_first(self, tmpdir):
p1 = tmpdir.ensure("a","1")
p2 = tmpdir.ensure("b","2")
p3 = tmpdir.ensure("breadth")
l = list(tmpdir.visit(lambda x: x.check(file=1)))
assert len(l) == 3
# check that breadth comes last
assert l[2] == p3
def test_visit_rec_fnmatch(self, tmpdir):
p1 = tmpdir.ensure("a","123")
p2 = tmpdir.ensure(".b","345")
l = list(tmpdir.visit("???", rec="[!.]*"))
assert len(l) == 1
# check that breadth comes last
assert l[0] == p1
def test_fnmatch_file_abspath(self, tmpdir):
b = tmpdir.join("a", "b")
assert b.fnmatch(os.sep.join("ab"))
pattern = os.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
def test_sysfind(self):
name = sys.platform == "win32" and "cmd" or "test"
x = py.path.local.sysfind(name)
assert x.check(file=1)
assert py.path.local.sysfind('jaksdkasldqwe') is None
assert py.path.local.sysfind(name, paths=[]) is None
x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
assert x2 == x
class TestExecutionOnWindows:
pytestmark = win32only
def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
tmpdir.ensure("hello")
h = tmpdir.ensure("hello.bat")
x = py.path.local.sysfind("hello")
assert x == h
class TestExecution:
pytestmark = skiponwin32
def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
noperm = tmpdir.ensure('noperm', dir=True)
monkeypatch.setenv("PATH", noperm, prepend=":")
noperm.chmod(0)
assert py.path.local.sysfind('jaksdkasldqwe') is None
def test_sysfind_absolute(self):
x = py.path.local.sysfind('test')
assert x.check(file=1)
y = py.path.local.sysfind(str(x))
assert y.check(file=1)
assert y == x
def test_sysfind_multiple(self, tmpdir, monkeypatch):
monkeypatch.setenv('PATH',
"%s:%s" % (tmpdir.ensure('a'),
tmpdir.join('b')),
prepend=":")
tmpdir.ensure('b', 'a')
checker = lambda x: x.dirpath().basename == 'b'
x = py.path.local.sysfind('a', checker=checker)
assert x.basename == 'a'
assert x.dirpath().basename == 'b'
checker = lambda x: None
assert py.path.local.sysfind('a', checker=checker) is None
def test_sysexec(self):
x = py.path.local.sysfind('ls')
out = x.sysexec('-a')
for x in py.path.local().listdir():
assert out.find(x.basename) != -1
def test_sysexec_failing(self):
x = py.path.local.sysfind('false')
py.test.raises(py.process.cmdexec.Error, """
x.sysexec('aksjdkasjd')
""")
def test_make_numbered_dir(self, tmpdir):
tmpdir.ensure('base.not_an_int', dir=1)
for i in range(10):
numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
keep=2, lock_timeout=0)
assert numdir.check()
assert numdir.basename == 'base.%d' %i
if i>=1:
assert numdir.new(ext=str(i-1)).check()
if i>=2:
assert numdir.new(ext=str(i-2)).check()
if i>=3:
assert not numdir.new(ext=str(i-3)).check()
def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
def notimpl(x, y):
raise NotImplementedError(42)
monkeypatch.setattr(py.std.os, 'symlink', notimpl)
x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
assert x.relto(tmpdir)
assert x.check()
def test_locked_make_numbered_dir(self, tmpdir):
for i in range(10):
numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
keep=2)
assert numdir.check()
assert numdir.basename == 'base2.%d' %i
for j in range(i):
assert numdir.new(ext=str(j)).check()
def test_error_preservation(self, path1):
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
#def test_parentdirmatch(self):
# local.parentdirmatch('std', startmodule=__name__)
#
class TestImport:
def test_pyimport(self, path1):
obj = path1.join('execfile.py').pyimport()
assert obj.x == 42
assert obj.__name__ == 'execfile'
def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
p = tmpdir.ensure("a", "test_x123.py")
p.pyimport()
tmpdir.join("a").move(tmpdir.join("b"))
pytest.raises(tmpdir.ImportMismatchError,
lambda: tmpdir.join("b", "test_x123.py").pyimport())
def test_pyimport_messy_name(self, tmpdir):
# http://bitbucket.org/hpk42/py-trunk/issue/129
path = tmpdir.ensure('foo__init__.py')
obj = path.pyimport()
def test_pyimport_dir(self, tmpdir):
p = tmpdir.join("hello_123")
p_init = p.ensure("__init__.py")
m = p.pyimport()
assert m.__name__ == "hello_123"
m = p_init.pyimport()
assert m.__name__ == "hello_123"
def test_pyimport_execfile_different_name(self, path1):
obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
assert obj.x == 42
assert obj.__name__ == '0x.y.z'
def test_pyimport_a(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('a.py').pyimport()
assert mod.result == "got it"
assert mod.__name__ == 'otherdir.a'
def test_pyimport_b(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('b.py').pyimport()
assert mod.stuff == "got it"
assert mod.__name__ == 'otherdir.b'
def test_pyimport_c(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('c.py').pyimport()
assert mod.value == "got it"
def test_pyimport_d(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('d.py').pyimport()
assert mod.value2 == "got it"
def test_pyimport_and_import(self, tmpdir):
tmpdir.ensure('xxxpackage', '__init__.py')
mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
mod1 = mod1path.pyimport()
assert mod1.__name__ == 'xxxpackage.module1'
from xxxpackage import module1
assert module1 is mod1
def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
name = 'pointsback123'
ModuleType = type(py.std.os)
p = tmpdir.ensure(name + '.py')
for ending in ('.pyc', '$py.class', '.pyo'):
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+ending)
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
newmod = p.pyimport()
assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+"123.py")
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
excinfo = py.test.raises(pseudopath.ImportMismatchError,
"p.pyimport()")
modname, modfile, orig = excinfo.value.args
assert modname == name
assert modfile == pseudopath
assert orig == p
assert issubclass(pseudopath.ImportMismatchError, ImportError)
def test_issue131_pyimport_on__init__(self, tmpdir):
# __init__.py files may be namespace packages, and thus the
# __file__ of an imported module may not be ourselves
# see issue
p1 = tmpdir.ensure("proja", "__init__.py")
p2 = tmpdir.ensure("sub", "proja", "__init__.py")
m1 = p1.pyimport()
m2 = p2.pyimport()
assert m1 == m2
def test_ensuresyspath_append(self, tmpdir):
root1 = tmpdir.mkdir("root1")
file1 = root1.ensure("x123.py")
assert str(root1) not in sys.path
file1.pyimport(ensuresyspath="append")
assert str(root1) == sys.path[-1]
assert str(root1) not in sys.path[:-1]
def test_pypkgdir(tmpdir):
pkg = tmpdir.ensure('pkg1', dir=1)
pkg.ensure("__init__.py")
pkg.ensure("subdir/__init__.py")
assert pkg.pypkgpath() == pkg
assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
def test_pypkgdir_unimportable(tmpdir):
pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
pkg.ensure("__init__.py")
subdir = pkg.ensure("subdir/__init__.py").dirpath()
assert subdir.pypkgpath() == subdir
assert subdir.ensure("xyz.py").pypkgpath() == subdir
assert not pkg.pypkgpath()
def test_isimportable():
from py._path.local import isimportable
assert not isimportable("")
assert isimportable("x")
assert isimportable("x1")
assert isimportable("x_1")
assert isimportable("_")
assert isimportable("_1")
assert not isimportable("x-1")
assert not isimportable("x:1")
def test_homedir_from_HOME(monkeypatch):
path = os.getcwd()
monkeypatch.setenv("HOME", path)
assert py.path.local._gethomedir() == py.path.local(path)
def test_homedir_not_exists(monkeypatch):
monkeypatch.delenv("HOME", raising=False)
monkeypatch.delenv("HOMEDRIVE", raising=False)
homedir = py.path.local._gethomedir()
assert homedir is None
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure("hello")
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if sys.platform == "win32":
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1.samefile(p2)
def test_listdir_single_arg(tmpdir):
tmpdir.ensure("hello")
assert tmpdir.listdir("hello")[0].basename == "hello"
def test_mkdtemp_rootdir(tmpdir):
dtmp = local.mkdtemp(rootdir=tmpdir)
assert tmpdir.listdir() == [dtmp]
class TestWINLocalPath:
pytestmark = win32only
def test_owner_group_not_implemented(self, path1):
py.test.raises(NotImplementedError, "path1.stat().owner")
py.test.raises(NotImplementedError, "path1.stat().group")
def test_chmod_simple_int(self, path1):
py.builtin.print_("path1 is", path1)
mode = path1.stat().mode
# Ensure that we actually change the mode to something different.
path1.chmod(mode == 0 and 1 or 0)
try:
print(path1.stat().mode)
print(mode)
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_path_comparison_lowercase_mixed(self, path1):
t1 = path1.join("a_path")
t2 = path1.join("A_path")
assert t1 == t1
assert t1 == t2
def test_relto_with_mixed_case(self, path1):
t1 = path1.join("a_path", "fiLe")
t2 = path1.join("A_path")
assert t1.relto(t2) == "fiLe"
def test_allow_unix_style_paths(self, path1):
t1 = path1.join('a_path')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('a_path/')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('dir/a_path')
assert t1 == str(path1) + '\\dir\\a_path'
def test_sysfind_in_currentdir(self, path1):
cmd = py.path.local.sysfind('cmd')
root = cmd.new(dirname='', basename='') # c:\ in most installations
with root.as_cwd():
x = py.path.local.sysfind(cmd.relto(root))
assert x.check(file=1)
def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
# path-matching patterns might contain a posix path separator '/'
# Test that we can match that pattern on windows.
import posixpath
b = tmpdir.join("a", "b")
assert b.fnmatch(posixpath.sep.join("ab"))
pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
class TestPOSIXLocalPath:
pytestmark = skiponwin32
def test_hardlink(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
nlink = filepath.stat().nlink
linkpath.mklinkto(filepath)
assert filepath.stat().nlink == nlink + 1
def test_symlink_are_identical(self, tmpdir):
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(filepath)
assert linkpath.readlink() == str(filepath)
def test_symlink_isfile(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
assert linkpath.check(file=1)
assert not linkpath.check(link=0, file=1)
assert linkpath.islink()
def test_symlink_relative(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath.mksymlinkto(filepath, absolute=False)
assert linkpath.readlink() == "file"
assert filepath.read() == linkpath.read()
def test_symlink_not_existing(self, tmpdir):
linkpath = tmpdir.join('testnotexisting')
assert not linkpath.check(link=1)
assert linkpath.check(link=0)
def test_relto_with_root(self, path1, tmpdir):
y = path1.join('x').relto(py.path.local('/'))
assert y[0] == str(path1)[1]
def test_visit_recursive_symlink(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
visitor = tmpdir.visit(None, lambda x: x.check(link=0))
assert list(visitor) == [linkpath]
def test_symlink_isdir(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
assert linkpath.check(dir=1)
assert not linkpath.check(link=0, dir=1)
def test_symlink_remove(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(linkpath) # point to itself
assert linkpath.check(link=1)
linkpath.remove()
assert not linkpath.check()
def test_realpath_file(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
realpath = linkpath.realpath()
assert realpath.basename == 'file'
def test_owner(self, path1, tmpdir):
from pwd import getpwuid
from grp import getgrgid
stat = path1.stat()
assert stat.path == path1
uid = stat.uid
gid = stat.gid
owner = getpwuid(uid)[0]
group = getgrgid(gid)[0]
assert uid == stat.uid
assert owner == stat.owner
assert gid == stat.gid
assert group == stat.group
def test_stat_helpers(self, tmpdir, monkeypatch):
path1 = tmpdir.ensure("file")
stat1 = path1.stat()
stat2 = tmpdir.stat()
assert stat1.isfile()
assert stat2.isdir()
assert not stat1.islink()
assert not stat2.islink()
def test_stat_non_raising(self, tmpdir):
path1 = tmpdir.join("file")
pytest.raises(py.error.ENOENT, lambda: path1.stat())
res = path1.stat(raising=False)
assert res is None
def test_atime(self, tmpdir):
import time
path = tmpdir.ensure('samplefile')
now = time.time()
atime1 = path.atime()
# we could wait here but timer resolution is very
# system dependent
path.read()
time.sleep(0.01)
atime2 = path.atime()
time.sleep(0.01)
duration = time.time() - now
assert (atime2-atime1) <= duration
def test_commondir(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = path1.join('otherthing')
assert p1.common(p2) == path1
assert p2.common(p1) == path1
def test_commondir_nocommon(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = py.path.local(path1.sep+'blabla')
assert p1.common(p2) == '/'
def test_join_to_root(self, path1):
root = path1.parts()[0]
assert len(str(root)) == 1
assert str(root.join('a')) == '//a' # posix allows two slashes
def test_join_root_to_root_with_no_abs(self, path1):
nroot = path1.join('/')
assert str(path1) == str(nroot)
assert path1 == nroot
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
path1.chmod(int(mode/2))
try:
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_chmod_rec_int(self, path1):
# XXX fragile test
recfilter = lambda x: x.check(dotfile=0, link=0)
oldmodes = {}
for x in path1.visit(rec=recfilter):
oldmodes[x] = x.stat().mode
path1.chmod(int("772", 8), rec=recfilter)
try:
for x in path1.visit(rec=recfilter):
assert x.stat().mode & int("777", 8) == int("772", 8)
finally:
for x,y in oldmodes.items():
x.chmod(y)
def test_copy_archiving(self, tmpdir):
unicode_fn = u"something-\342\200\223.txt"
f = tmpdir.ensure("a", unicode_fn)
a = f.dirpath()
oldmode = f.stat().mode
newmode = oldmode ^ 1
f.chmod(newmode)
b = tmpdir.join("b")
a.copy(b, mode=True)
assert b.join(f.basename).stat().mode == newmode
@failsonjython
def test_chown_identity(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
@failsonjython
def test_chown_dangling_link(self, path1):
owner = path1.stat().owner
group = path1.stat().group
x = path1.join('hello')
x.mksymlinkto('qlwkejqwlek')
try:
path1.chown(owner, group, rec=1)
finally:
x.remove(rec=0)
@failsonjython
def test_chown_identity_rec_mayfail(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
class TestUnicodePy2Py3:
def test_join_ensure(self, tmpdir, monkeypatch):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.join(part) == y
def test_listdir(self, tmpdir):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.listdir(part)[0] == y
@pytest.mark.xfail(reason="changing read/write might break existing usages")
def test_read_write(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
x.write(part)
assert x.read() == part
x.write(part.encode(sys.getdefaultencoding()))
assert x.read() == part.encode(sys.getdefaultencoding())
class TestBinaryAndTextMethods:
def test_read_binwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_binary(part_utf8)
assert x.read_binary() == part_utf8
s = x.read_text(encoding="utf8")
assert s == part
assert py.builtin._istext(s)
def test_read_textwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_text(part, encoding="utf8")
assert x.read_binary() == part_utf8
assert x.read_text(encoding="utf8") == part
def test_default_encoding(self, tmpdir):
x = tmpdir.join("hello")
# Can't use UTF8 as the default encoding (ASCII) doesn't support it
part = py.builtin._totext("hello", "ascii")
x.write_text(part, "ascii")
s = x.read_text("ascii")
assert s == part
assert type(s) == type(part)
| mpl-2.0 |
TieWei/nova | nova/compute/flavors.py | 8 | 10001 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in instance properties."""
import re
import uuid
from oslo.config import cfg
from nova import context
from nova import db
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
from nova import utils
flavor_opts = [
cfg.StrOpt('default_flavor',
# Deprecated in Havana
deprecated_name='default_instance_type',
default='m1.small',
help='default flavor to use for the EC2 API only. The Nova API '
'does not support a default flavor.'),
]
CONF = cfg.CONF
CONF.register_opts(flavor_opts)
LOG = logging.getLogger(__name__)
VALID_NAME_OR_ID_REGEX = re.compile("^[\w\.\- ]*$")
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_flavor_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
if not flavorid:
flavorid = uuid.uuid4()
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
# ensure name do not exceed 255 characters
utils.check_string_length(name, 'name', min_length=1, max_length=255)
# ensure name does not contain any special characters
valid_name = VALID_NAME_OR_ID_REGEX.search(name)
if not valid_name:
msg = _("names can only contain [a-zA-Z0-9_.- ]")
raise exception.InvalidInput(reason=msg)
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
flavorid = unicode(flavorid)
# ensure leading/trailing whitespaces not present.
if flavorid.strip() != flavorid:
msg = _("id cannot contain leading and/or trailing whitespace(s)")
raise exception.InvalidInput(reason=msg)
# ensure flavor id does not exceed 255 characters
utils.check_string_length(flavorid, 'id', min_length=1,
max_length=255)
# ensure flavor id does not contain any special characters
valid_flavor_id = VALID_NAME_OR_ID_REGEX.search(flavorid)
if not valid_flavor_id:
msg = _("id can only contain [a-zA-Z0-9_.- ]")
raise exception.InvalidInput(reason=msg)
# Some attributes are positive ( > 0) integers
for option in ['memory_mb', 'vcpus']:
try:
if int(str(kwargs[option])) <= 0:
raise ValueError()
kwargs[option] = int(kwargs[option])
except (ValueError, TypeError):
msg = _("'%s' argument must be a positive integer") % option
raise exception.InvalidInput(reason=msg)
# Some attributes are non-negative ( >= 0) integers
for option in ['root_gb', 'ephemeral_gb', 'swap']:
try:
if int(str(kwargs[option])) < 0:
raise ValueError()
kwargs[option] = int(kwargs[option])
except (ValueError, TypeError):
msg = _("'%s' argument must be an integer greater than or"
" equal to 0") % option
raise exception.InvalidInput(reason=msg)
# rxtx_factor should be a positive float
try:
kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
if kwargs['rxtx_factor'] <= 0:
raise ValueError()
except ValueError:
msg = _("'rxtx_factor' argument must be a positive float")
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
kwargs['flavorid'] = flavorid
# ensure is_public attribute is boolean
try:
kwargs['is_public'] = strutils.bool_from_string(
is_public, strict=True)
except ValueError:
raise exception.InvalidInput(reason=_("is_public must be a boolean"))
try:
return db.flavor_create(context.get_admin_context(), kwargs)
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
raise exception.InstanceTypeCreateFailed()
def destroy(name):
"""Marks flavor as deleted."""
try:
if not name:
raise ValueError()
db.flavor_destroy(context.get_admin_context(), name)
except (ValueError, exception.NotFound):
LOG.exception(_('Instance type %s not found for deletion') % name)
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
def get_all_flavors(ctxt=None, inactive=False, filters=None):
"""Get all non-deleted flavors as a dict.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
inst_types = db.flavor_get_all(
ctxt, inactive=inactive, filters=filters)
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['id']] = inst_type
return inst_type_dict
def get_all_flavors_sorted_list(ctxt=None, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc',
limit=None, marker=None):
"""Get all non-deleted flavors as a sorted list.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_all(ctxt, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def get_default_flavor():
"""Get the default flavor."""
name = CONF.default_flavor
return get_flavor_by_name(name)
def get_flavor(instance_type_id, ctxt=None, inactive=False):
"""Retrieves single flavor by id."""
if instance_type_id is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
if inactive:
ctxt = ctxt.elevated(read_deleted="yes")
return db.flavor_get(ctxt, instance_type_id)
def get_flavor_by_name(name, ctxt=None):
"""Retrieves single flavor by name."""
if name is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve flavor by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return db.flavor_get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve flavor access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_get_by_flavor_id(ctxt, flavorid)
def add_flavor_access(flavorid, projectid, ctxt=None):
"""Add flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_add(ctxt, flavorid, projectid)
def remove_flavor_access(flavorid, projectid, ctxt=None):
"""Remove flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_remove(ctxt, flavorid, projectid)
def extract_flavor(instance, prefix=''):
"""Create an InstanceType-like object from instance's system_metadata
information.
"""
instance_type = {}
sys_meta = utils.instance_sys_meta(instance)
for key, type_fn in system_metadata_flavor_props.items():
type_key = '%sinstance_type_%s' % (prefix, key)
instance_type[key] = type_fn(sys_meta[type_key])
return instance_type
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
pci_request.save_flavor_pci_info(metadata, instance_type, prefix)
return metadata
def delete_flavor_info(metadata, *prefixes):
"""Delete flavor instance_type information from instance's system_metadata
by prefix.
"""
for key in system_metadata_flavor_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
pci_request.delete_flavor_pci_info(metadata, *prefixes)
return metadata
| apache-2.0 |
Ichag/odoo | addons/account/wizard/account_journal_select.py | 385 | 2068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
melizeche/get-shorty | tests_shorty.py | 1 | 2998 | import os
import json
import getshorty
import unittest
import tempfile
URL = 'http://google.com'
URL_MOBILE = 'http://facebook.com'
URL_TABLET = 'https://yahoo.com'
BAD_URL = 'http//google.com'
UA = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
UA_MOBILE = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25'
UA_TABLET = 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10'
class GetShortyTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, getshorty.app.config['DATABASE'] = tempfile.mkstemp()
getshorty.app.config['TESTING'] = True
self.app = getshorty.app.test_client()
with getshorty.app.app_context():
getshorty.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(getshorty.app.config['DATABASE'])
def test_empty_db(self):
resp = self.app.get('/api/1.0/list')
assert b'[]' in resp.data
def test_bad_methods(self):
resp = self.app.post('/api/1.0/list')
resp2 = self.app.get('/api/1.0/create')
assert resp.status_code == 405
assert resp2.status_code == 405
def test_create_empty(self):
resp = self.app.post('/api/1.0/create', data='{}')
assert b'"url parameter is mandatory' in resp.data
def test_create_badurl(self):
resp = self.app.post(
'/api/1.0/create', data=json.dumps(dict(url=BAD_URL)))
assert b'"invalid url"' in resp.data
def test_create_single(self):
resp = self.app.post(
'/api/1.0/create', data=json.dumps(dict(url=URL)))
assert resp.status_code == 201
short_url = json.loads(resp.data.decode('utf-8'))['shorten']
resp2 = self.app.get(short_url)
assert resp2.status_code == 302
def test_create_complete(self):
resp = self.app.post(
'/api/1.0/create', data='{"url":"%s","url-mobile":"%s","url-tablet":"%s"}' % (URL, URL_MOBILE, URL_TABLET))
assert resp.status_code == 201
short_url = json.loads(resp.data.decode('utf-8'))['shorten']
resp_default = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA})
assert resp_default.status_code == 302
assert resp_default.location == URL
resp_mobile = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA_MOBILE})
assert resp_mobile.status_code == 302
assert resp_mobile.location == URL_MOBILE
resp_tablet = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA_TABLET})
assert resp_tablet.status_code == 302
assert resp_tablet.location == URL_TABLET
if __name__ == '__main__':
unittest.main()
| mit |
macchina-io/macchina.io | platform/JS/V8/v8/third_party/jinja2/runtime.py | 335 | 22530 | # -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import sys
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound', 'make_logging_undefined']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return environment.context_class(environment, parent, template_name,
blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that + the number of iterations so far.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
iterations_done = self.index0 + 2
self._length = len(iterable) + iterations_done
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`jinja2.exceptions.UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined'
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
| apache-2.0 |
zenna/dreal3 | benchmarks/network/battery/gen.py | 55 | 1248 |
flow_var = {}
flow_dec = {}
state_dec = {}
state_val = {}
cont_cond = {}
jump_cond = {}
def getHdr(n):
res = []
for i in range(n):
getHdr.counter += 1
res.append(getHdr.counter)
return res
getHdr.counter = 0
######################
# Formula generation #
######################
def print_loop(bound, steps, keys, holder):
c = 0
while True:
for j in range(steps):
hd = getHdr(holder)
for i in keys:
print(cont_cond[i][j].format(c,*hd).strip())
if c >= bound:
return
for i in keys:
print(jump_cond[i][j].format(c,c+1).strip())
c += 1
def generate(bound, steps, keys, holder, init, goal):
print("(set-logic QF_NRA_ODE)")
for i in keys:
print(flow_var[i].strip())
for i in keys:
print(flow_dec[i].strip())
for b in range(bound + 1):
for i in keys:
print(state_dec[i].format(b).strip())
for b in range(bound + 1):
for i in keys:
print(state_val[i].format(b).strip())
print(init.format(0).strip())
print_loop(bound, steps, keys, holder)
print(goal.format(bound).strip())
print("(check-sat)\n(exit)")
| gpl-3.0 |
HBehrens/feedsanitizer | djangotoolbox/middleware.py | 85 | 2801 | from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils.cache import patch_cache_control
LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ())
NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ())
ALLOWED_DOMAINS = getattr(settings, 'ALLOWED_DOMAINS', None)
NON_REDIRECTED_PATHS = getattr(settings, 'NON_REDIRECTED_PATHS', ())
NON_REDIRECTED_BASE_PATHS = tuple(path.rstrip('/') + '/'
for path in NON_REDIRECTED_PATHS)
class LoginRequiredMiddleware(object):
"""
Redirects to login page if request path begins with a
LOGIN_REQURED_PREFIXES prefix. You can also specify
NO_LOGIN_REQUIRED_PREFIXES which take precedence.
"""
def process_request(self, request):
for prefix in NO_LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix):
return None
for prefix in LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix) and \
not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return None
class RedirectMiddleware(object):
"""
A static redirect middleware. Mostly useful for hosting providers that
automatically setup an alternative domain for your website. You might
not want anyone to access the site via those possibly well-known URLs.
"""
def process_request(self, request):
host = request.get_host().split(':')[0]
# Turn off redirects when in debug mode, running unit tests, or
# when handling an App Engine cron job.
if (settings.DEBUG or host == 'testserver' or
not ALLOWED_DOMAINS or
request.META.get('HTTP_X_APPENGINE_CRON') == 'true' or
request.path.startswith('/_ah/') or
request.path in NON_REDIRECTED_PATHS or
request.path.startswith(NON_REDIRECTED_BASE_PATHS)):
return
if host not in settings.ALLOWED_DOMAINS:
return HttpResponseRedirect('http://' + settings.ALLOWED_DOMAINS[0]
+ request.path)
class NoHistoryCacheMiddleware(object):
"""
If user is authenticated we disable browser caching of pages in history.
"""
def process_response(self, request, response):
if 'Expires' not in response and \
'Cache-Control' not in response and \
hasattr(request, 'session') and \
request.user.is_authenticated():
patch_cache_control(response,
no_store=True, no_cache=True, must_revalidate=True, max_age=0)
return response
| mit |
kimiyoung/transformer-xl | pytorch/utils/proj_adaptive_softmax.py | 1 | 5692 | from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target, keep_order=False):
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:,None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
| apache-2.0 |
xianggong/m2c_unit_test | test/operator/remainder_ulongulong/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 |
EchO-KID/google-breakpad | src/tools/gyp/test/mac/gyptest-strip.py | 80 | 1538 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that stripping works.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='strip')
test.build('test.gyp', test.ALL, chdir='strip')
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, type=test.SHARED_LIB, chdir='strip')
def CheckNsyms(p, n_expected):
r = re.compile(r'nsyms\s+(\d+)')
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
m = r.search(o)
n = int(m.group(1))
if n != n_expected:
print 'Stripping: Expected %d symbols, got %d' % (n_expected, n)
test.fail_test()
# The actual numbers here are not interesting, they just need to be the same
# in both the xcode and the make build.
CheckNsyms(OutPath('no_postprocess'), 11)
CheckNsyms(OutPath('no_strip'), 11)
CheckNsyms(OutPath('strip_all'), 0)
CheckNsyms(OutPath('strip_nonglobal'), 2)
CheckNsyms(OutPath('strip_debugging'), 3)
CheckNsyms(OutPath('strip_all_custom_flags'), 0)
CheckNsyms(test.built_file_path(
'strip_all_bundle.framework/Versions/A/strip_all_bundle', chdir='strip'),
0)
CheckNsyms(OutPath('strip_save'), 3)
test.pass_test()
| bsd-3-clause |
hirofumi0810/tensorflow_end2end_speech_recognition | utils/io/labels/character.py | 1 | 3892 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Char2idx(object):
"""Convert from character to index.
Args:
map_file_path (string): path to the mapping file
double_letter (bool, optional): if True, group repeated letters
"""
def __init__(self, map_file_path, double_letter=False):
self.double_letter = double_letter
# Read the mapping file
self.map_dict = {}
with open(map_file_path, 'r') as f:
for line in f:
line = line.strip().split()
self.map_dict[line[0]] = int(line[1])
def __call__(self, str_char):
"""
Args:
str_char (string): string of characters
Returns:
char_list (list): character indices
"""
char_list = list(str_char)
# Convert from character to index
if self.double_letter:
skip_flag = False
for i in range(len(char_list) - 1):
if skip_flag:
char_list[i] = ''
skip_flag = False
continue
if char_list[i] + char_list[i + 1] in self.map_dict.keys():
char_list[i] = self.map_dict[char_list[i] +
char_list[i + 1]]
skip_flag = True
else:
char_list[i] = self.map_dict[char_list[i]]
# Final character
if skip_flag:
char_list[-1] = ''
else:
char_list[-1] = self.map_dict[char_list[-1]]
# Remove skipped characters
while '' in char_list:
char_list.remove('')
else:
for i in range(len(char_list)):
char_list[i] = self.map_dict[char_list[i]]
return char_list
class Idx2char(object):
"""Convert from index to character.
Args:
map_file_path (string): path to the mapping file
capital_divide (bool, optional): set True when using capital-divided
character sequences
space_mark (string): the space mark to divide a sequence into words
"""
def __init__(self, map_file_path, capital_divide=False, space_mark=' '):
self.capital_divide = capital_divide
self.space_mark = space_mark
# Read the mapping file
self.map_dict = {}
with open(map_file_path, 'r') as f:
for line in f:
line = line.strip().split()
self.map_dict[int(line[1])] = line[0]
def __call__(self, index_list, padded_value=-1):
"""
Args:
index_list (np.ndarray): list of character indices.
Batch size 1 is expected.
padded_value (int): the value used for padding
Returns:
str_char (string): a sequence of characters
"""
# Remove padded values
assert type(
index_list) == np.ndarray, 'index_list should be np.ndarray.'
index_list = np.delete(index_list, np.where(
index_list == padded_value), axis=0)
# Convert from indices to the corresponding characters
char_list = list(map(lambda x: self.map_dict[x], index_list))
if self.capital_divide:
char_list_tmp = []
for i in range(len(char_list)):
if i != 0 and 'A' <= char_list[i] <= 'Z':
char_list_tmp += [self.space_mark, char_list[i].lower()]
else:
char_list_tmp += [char_list[i].lower()]
str_char = ''.join(char_list_tmp)
else:
str_char = ''.join(char_list)
return str_char
# TODO: change to batch version
| mit |
mishravikas/geonode-permissions | geonode/people/forms.py | 35 | 2900 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import taggit
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.utils.translation import ugettext_lazy as _
from geonode.people.models import Profile
from geonode.base.models import ContactRole
# Ported in from django-registration
attrs_dict = {'class': 'required'}
class ProfileCreationForm(UserCreationForm):
class Meta:
model = Profile
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
Profile.objects.get(username=username)
except Profile.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
class ProfileChangeForm(UserChangeForm):
class Meta:
model = Profile
fields = '__all__'
class ForgotUsernameForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_(u'Email Address'))
class RoleForm(forms.ModelForm):
class Meta:
model = ContactRole
exclude = ('contact', 'layer')
class PocForm(forms.Form):
contact = forms.ModelChoiceField(label="New point of contact",
queryset=Profile.objects.all())
class ProfileForm(forms.ModelForm):
keywords = taggit.forms.TagField(
required=False,
help_text=_("A space or comma-separated list of keywords"))
class Meta:
model = Profile
exclude = (
'user',
'password',
'last_login',
'groups',
'user_permissions',
'username',
'is_staff',
'is_superuser',
'is_active',
'date_joined')
| gpl-3.0 |
oscarolar/odoo | addons/mail/mail_alias.py | 16 | 15252 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import unicodedata
from openerp.osv import fields, osv
from openerp.tools import ustr
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class mail_alias(osv.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
def _get_alias_domain(self, cr, uid, ids, name, args, context=None):
ir_config_parameter = self.pool.get("ir.config_parameter")
domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
return dict.fromkeys(ids, domain or "")
_columns = {
'alias_name': fields.char('Alias Name',
help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <jobs@example.my.openerp.com>",),
'alias_model_id': fields.many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (OpenERP Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]"),
'alias_user_id': fields.many2one('res.users', 'Owner',
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address."),
'alias_defaults': fields.text('Default Values', required=True,
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias."),
'alias_force_thread_id': fields.integer('Record Thread ID',
help="Optional ID of a thread (record) to which all incoming "
"messages will be attached, even if they did not reply to it. "
"If set, this will disable the creation of new records completely."),
'alias_domain': fields.function(_get_alias_domain, string="Alias domain", type='char'),
'alias_parent_model_id': fields.many2one('ir.model', 'Parent Model',
help="Parent model holding the alias. The model holding the alias reference\n"
"is not necessarily the model given by alias_model_id\n"
"(example: project (parent_model) and task (model))"),
'alias_parent_thread_id': fields.integer('Parent Record Thread ID',
help="ID of the parent record holding the alias (example: project holding the task creation alias)"),
'alias_contact': fields.selection([
('everyone', 'Everyone'),
('partners', 'Authenticated Partners'),
('followers', 'Followers only'),
], string='Alias Contact Security', required=True,
help="Policy to post a message on the document using the mailgateway.\n"
"- everyone: everyone can post\n"
"- partners: only authenticated partners\n"
"- followers: only followers of the related document\n"),
}
_defaults = {
'alias_defaults': '{}',
'alias_user_id': lambda self, cr, uid, context: uid,
# looks better when creating new aliases - even if the field is informative only
'alias_domain': lambda self, cr, uid, context: self._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
'alias_contact': 'everyone',
}
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
def _check_alias_defaults(self, cr, uid, ids, context=None):
try:
for record in self.browse(cr, uid, ids, context=context):
dict(eval(record.alias_defaults))
except Exception:
return False
return True
_constraints = [
(_check_alias_defaults, '''Invalid expression, it must be a literal python dictionary definition e.g. "{'field': 'value'}"''', ['alias_defaults']),
]
def name_get(self, cr, uid, ids, context=None):
"""Return the mail alias display alias_name, including the implicit
mail catchall domain if exists from config otherwise "New Alias".
e.g. `jobs@openerp.my.openerp.com` or `jobs` or 'New Alias'
"""
res = []
for record in self.browse(cr, uid, ids, context=context):
if record.alias_name and record.alias_domain:
res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain)))
elif record.alias_name:
res.append((record['id'], "%s" % (record.alias_name)))
else:
res.append((record['id'], _("Inactive Alias")))
return res
def _find_unique(self, cr, uid, name, context=None):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
if not self.search(cr, uid, [('alias_name', '=', new_name)]):
break
sequence = (sequence + 1) if sequence else 2
return new_name
def _clean_and_make_unique(self, cr, uid, name, context=None):
# when an alias name appears to already be an email, we keep the local part only
name = remove_accents(name).lower().split('@')[0]
name = re.sub(r'[^\w+.]+', '-', name)
return self._find_unique(cr, uid, name, context=context)
def migrate_to_alias(self, cr, child_model_name, child_table_name, child_model_auto_init_fct,
alias_model_name, alias_id_column, alias_key, alias_prefix='', alias_force_key='', alias_defaults={},
alias_generate_name=False, context=None):
""" Installation hook to create aliases for all users and avoid constraint errors.
:param child_model_name: model name of the child class (i.e. res.users)
:param child_table_name: table name of the child class (i.e. res_users)
:param child_model_auto_init_fct: pointer to the _auto_init function
(i.e. super(res_users,self)._auto_init(cr, context=context))
:param alias_model_name: name of the aliased model
:param alias_id_column: alias_id column (i.e. self._columns['alias_id'])
:param alias_key: name of the column used for the unique name (i.e. 'login')
:param alias_prefix: prefix for the unique name (i.e. 'jobs' + ...)
:param alias_force_key': name of the column for force_thread_id;
if empty string, not taken into account
:param alias_defaults: dict, keys = mail.alias columns, values = child
model column name used for default values (i.e. {'job_id': 'id'})
:param alias_generate_name: automatically generate alias name using prefix / alias key;
default alias_name value is False because since 8.0 it is not required anymore
"""
if context is None:
context = {}
# disable the unique alias_id not null constraint, to avoid spurious warning during
# super.auto_init. We'll reinstall it afterwards.
alias_id_column.required = False
# call _auto_init
res = child_model_auto_init_fct(cr, context=context)
registry = RegistryManager.get(cr.dbname)
mail_alias = registry.get('mail.alias')
child_class_model = registry[child_model_name]
no_alias_ids = child_class_model.search(cr, SUPERUSER_ID, [('alias_id', '=', False)], context={'active_test': False})
# Use read() not browse(), to avoid prefetching uninitialized inherited fields
for obj_data in child_class_model.read(cr, SUPERUSER_ID, no_alias_ids, [alias_key]):
alias_vals = {'alias_name': False}
if alias_generate_name:
alias_vals['alias_name'] = '%s%s' % (alias_prefix, obj_data[alias_key])
if alias_force_key:
alias_vals['alias_force_thread_id'] = obj_data[alias_force_key]
alias_vals['alias_defaults'] = dict((k, obj_data[v]) for k, v in alias_defaults.iteritems())
alias_vals['alias_parent_thread_id'] = obj_data['id']
alias_create_ctx = dict(context, alias_model_name=alias_model_name, alias_parent_model_name=child_model_name)
alias_id = mail_alias.create(cr, SUPERUSER_ID, alias_vals, context=alias_create_ctx)
child_class_model.write(cr, SUPERUSER_ID, obj_data['id'], {'alias_id': alias_id}, context={'mail_notrack': True})
_logger.info('Mail alias created for %s %s (id %s)', child_model_name, obj_data[alias_key], obj_data['id'])
# Finally attempt to reinstate the missing constraint
try:
cr.execute('ALTER TABLE %s ALTER COLUMN alias_id SET NOT NULL' % (child_table_name))
except Exception:
_logger.warning("Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
child_table_name, 'alias_id', child_table_name, 'alias_id')
# set back the unique alias_id constraint
alias_id_column.required = True
return res
def create(self, cr, uid, vals, context=None):
""" Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
context value, if provided.
"""
if context is None:
context = {}
model_name = context.get('alias_model_name')
parent_model_name = context.get('alias_parent_model_name')
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), context=context)
if model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)], context=context)[0]
vals['alias_model_id'] = model_id
if parent_model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', parent_model_name)], context=context)[0]
vals['alias_parent_model_id'] = model_id
return super(mail_alias, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
""""give uniqe alias name if given alias name is allready assigned"""
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), context=context)
return super(mail_alias, self).write(cr, uid, ids, vals, context=context)
def open_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_model_id or not alias.alias_force_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_model_id.model,
'res_id': alias.alias_force_thread_id,
'type': 'ir.actions.act_window',
}
def open_parent_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_parent_model_id or not alias.alias_parent_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_parent_model_id.model,
'res_id': alias.alias_parent_thread_id,
'type': 'ir.actions.act_window',
}
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/tensorflow/contrib/training/python/training/training.py | 50 | 19844 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains various routines and helper functions for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
tf.contrib.losses.log_loss(predictions, labels)
total_loss = tf.contrib.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Run training.
tf.contrib.training.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to use the `train` function, one needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. tf.contrib.training.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
def transform_grads_fn(grads):
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
return tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=transform_grads_fn)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. tf.contrib.training.create_train_op
allows a user to pass in a list of update_ops to call along with the gradient
updates.
train_op = tf.contrib.training.create_train_op(
total_loss, optimizer, update_ops)
By default, tf.contrib.training.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the
tf.contrib.layers.batch_norm function adds the moving mean and moving variance
updates to this collection. Consequently, users who want to use
tf.contrib.layers.batch_norm will not need to take any additional steps in order
to have the moving mean and moving variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force `create_train_op` to NOT use ANY update_ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use a set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
One can use a tf.Scaffold and an initializing function to do so.
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = tf.contrib.framework.get_model_variables()
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
# Run training.
scaffold = tf.Scaffold(init_fn=init_fn)
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint':
tf.contrib.framework.get_unique_variable('var0'),
'name_var_1_in_checkpoint':
tf.contrib.framework.get_unique_variable('var1')
}
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=["conv"])
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values coming from an
arbitrary source (a text document, matlab file, etc). While this is technically
feasible using assign operations, this strategy results in the values of your
weights being stored in the graph. For large models, this becomes prohibitively
large. However, it's possible to perform this initial assignment without having
to store the values of the initial model in the graph itself by using
placeholders and a feed dictionary:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import optimizer as tf_optimizer
# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
# multiply_gradients into contrib/summaries and contrib/optimizers.py
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'create_train_op',
'multiply_gradients',
'train',
]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '_gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '_gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
transform_grads_fn=None,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
transform_grads_fn: A function which takes a single argument, a list of
gradient to variable pairs (tuples), performs any requested gradient
updates, such as gradient clipping or multipliers, and returns the updated
list.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is _USE_GLOBAL_STEP:
global_step = variables.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
' update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in tf.trainable_variables()
for v in variables_to_train:
assert v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss,
variables_to_train,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if transform_grads_fn:
grads = transform_grads_fn(grads)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
if check_numerics:
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def train(train_op,
logdir,
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None):
"""Runs the training loop.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.train.Scaffold instance.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
training loop.
chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run
inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.ConfigProto`.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
`save_summaries_steps` are `None.
"""
if logdir is None and is_chief:
if save_summaries_steps:
raise ValueError(
'logdir cannot be None when save_summaries_steps is not None')
if save_checkpoint_secs:
raise ValueError(
'logdir cannot be None when save_checkpoint_secs is not None')
with monitored_session.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=logdir,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config) as session:
loss = None
while not session.should_stop():
loss = session.run(train_op)
return loss
| mit |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/test/test_largefile.py | 129 | 7642 | """Test largefile support on system where this makes sense.
"""
from __future__ import print_function
import os
import stat
import sys
import unittest
from test.test_support import run_unittest, TESTFN, verbose, requires, \
unlink
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import signal
# The default handler for SIGXFSZ is to abort the process.
# By ignoring it, system calls exceeding the file size resource
# limit will raise IOError instead of crashing the interpreter.
oldhandler = signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
except (ImportError, AttributeError):
pass
# create >2GB file (2GB = 2147483648 bytes)
size = 2500000000
class LargeFileTest(unittest.TestCase):
"""Test that each file function works as expected for a large
(i.e. > 2GB, do we have to check > 4GB) files.
NOTE: the order of execution of the test methods is important! test_seek
must run first to create the test file. File cleanup must also be handled
outside the test instances because of this.
"""
def test_seek(self):
if verbose:
print('create large file via seek (may be sparse file) ...')
with self.open(TESTFN, 'wb') as f:
f.write(b'z')
f.seek(0)
f.seek(size)
f.write(b'a')
f.flush()
if verbose:
print('check file size with os.fstat')
self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
def test_osstat(self):
if verbose:
print('check file size with os.stat')
self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1)
def test_seek_read(self):
if verbose:
print('play around with seek() and read() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
f.seek(0)
self.assertEqual(f.tell(), 0)
f.seek(0, 0)
self.assertEqual(f.tell(), 0)
f.seek(42)
self.assertEqual(f.tell(), 42)
f.seek(42, 0)
self.assertEqual(f.tell(), 42)
f.seek(42, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 2) # seek from the end
self.assertEqual(f.tell(), size + 1 + 0)
f.seek(-10, 2)
self.assertEqual(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
self.assertEqual(f.tell(), 0)
f.seek(size)
self.assertEqual(f.tell(), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
f.seek(-size-1, 1)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
def test_lseek(self):
if verbose:
print('play around with os.lseek() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(os.lseek(f.fileno(), 0, 0), 0)
self.assertEqual(os.lseek(f.fileno(), 42, 0), 42)
self.assertEqual(os.lseek(f.fileno(), 42, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0)
self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10)
self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0)
self.assertEqual(os.lseek(f.fileno(), size, 0), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
def test_truncate(self):
if verbose:
print('try truncate')
with self.open(TESTFN, 'r+b') as f:
# this is already decided before start running the test suite
# but we do it anyway for extra protection
if not hasattr(f, 'truncate'):
raise unittest.SkipTest("open().truncate() not available on this system")
f.seek(0, 2)
# else we've lost track of the true size
self.assertEqual(f.tell(), size+1)
# Cut it back via seek + truncate with no argument.
newsize = size - 10
f.seek(newsize)
f.truncate()
self.assertEqual(f.tell(), newsize) # else pointer moved
f.seek(0, 2)
self.assertEqual(f.tell(), newsize) # else wasn't truncated
# Ensure that truncate(smaller than true size) shrinks
# the file.
newsize -= 1
f.seek(42)
f.truncate(newsize)
if self.new_io:
self.assertEqual(f.tell(), 42)
f.seek(0, 2)
self.assertEqual(f.tell(), newsize)
# XXX truncate(larger than true size) is ill-defined
# across platform; cut it waaaaay back
f.seek(0)
f.truncate(1)
if self.new_io:
self.assertEqual(f.tell(), 0) # else pointer moved
f.seek(0)
self.assertEqual(len(f.read()), 1) # else wasn't truncated
def test_seekable(self):
# Issue #5016; seekable() can return False when the current position
# is negative when truncated to an int.
if not self.new_io:
self.skipTest("builtin file doesn't have seekable()")
for pos in (2**31-1, 2**31, 2**31+1):
with self.open(TESTFN, 'rb') as f:
f.seek(pos)
self.assertTrue(f.seekable())
def test_main():
# On Windows and Mac OSX this test comsumes large resources; It
# takes a long time to build the >2GB file and takes >2GB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(size))
else:
# Only run if the current filesystem supports large files.
# (Skip this test on Windows, since we now always support
# large files.)
f = open(TESTFN, 'wb', buffering=0)
try:
# 2**31 == 2147483648
f.seek(2147483649)
# Seeking is not enough of a test: you must write and
# flush, too!
f.write(b'x')
f.flush()
except (IOError, OverflowError):
f.close()
unlink(TESTFN)
raise unittest.SkipTest("filesystem does not have largefile support")
else:
f.close()
suite = unittest.TestSuite()
for _open, prefix in [(io.open, 'C'), (pyio.open, 'Py'),
(open, 'Builtin')]:
class TestCase(LargeFileTest):
pass
TestCase.open = staticmethod(_open)
TestCase.new_io = _open is not open
TestCase.__name__ = prefix + LargeFileTest.__name__
suite.addTest(TestCase('test_seek'))
suite.addTest(TestCase('test_osstat'))
suite.addTest(TestCase('test_seek_read'))
suite.addTest(TestCase('test_lseek'))
with _open(TESTFN, 'wb') as f:
if hasattr(f, 'truncate'):
suite.addTest(TestCase('test_truncate'))
suite.addTest(TestCase('test_seekable'))
unlink(TESTFN)
try:
run_unittest(suite)
finally:
unlink(TESTFN)
if __name__ == '__main__':
test_main()
| apache-2.0 |
hsaputra/tensorflow | tensorflow/examples/adding_an_op/cuda_op.py | 192 | 1062 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
if tf.test.is_built_with_cuda():
_cuda_op_module = tf.load_op_library(os.path.join(
tf.resource_loader.get_data_files_path(), 'cuda_op_kernel.so'))
add_one = _cuda_op_module.add_one
| apache-2.0 |
liuqr/edx-xiaodun | cms/djangoapps/contentstore/management/commands/import.py | 16 | 2180 | """
Script for importing courseware from XML format
"""
from django.core.management.base import BaseCommand, CommandError, make_option
from django_comment_common.utils import (seed_permissions_roles,
are_permissions_roles_seeded)
from xmodule.modulestore.xml_importer import import_from_xml
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""
Import the specified data directory into the default ModuleStore
"""
help = 'Import the specified data directory into the default ModuleStore'
option_list = BaseCommand.option_list + (
make_option('--nostatic',
action='store_true',
help='Skip import of static content'),
)
def handle(self, *args, **options):
"Execute the command"
if len(args) == 0:
raise CommandError("import requires at least one argument: <data directory> [--nostatic] [<course dir>...]")
data_dir = args[0]
do_import_static = not (options.get('nostatic', False))
if len(args) > 1:
course_dirs = args[1:]
else:
course_dirs = None
self.stdout.write("Importing. Data_dir={data}, course_dirs={courses}\n".format(
data=data_dir,
courses=course_dirs,
dis=do_import_static))
try:
mstore = modulestore('direct')
except KeyError:
self.stdout.write('Unable to load direct modulestore, trying '
'default\n')
mstore = modulestore('default')
_, course_items = import_from_xml(
mstore, data_dir, course_dirs, load_error_modules=False,
static_content_store=contentstore(), verbose=True,
do_import_static=do_import_static
)
for module in course_items:
course_id = module.location.course_id
if not are_permissions_roles_seeded(course_id):
self.stdout.write('Seeding forum roles for course {0}'.format(course_id))
seed_permissions_roles(course_id)
| agpl-3.0 |
gangadhar-kadam/verve_frappe | frappe/templates/pages/website_theme.py | 4 | 1677 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import re
import frappe
from frappe.website.utils import get_shade
from frappe.website.doctype.website_theme.website_theme import get_active_theme
no_sitemap = 1
base_template_path = "templates/pages/website_theme.css"
default_properties = {
"background_color": "#ffffff",
"top_bar_color": "#ffffff",
"top_bar_text_color": "#000000",
"footer_color": "#ffffff",
"footer_text_color": "#000000",
"font_size": "14px",
"text_color": "#000000",
"link_color": "#000000"
}
def get_context(context):
"""returns web style"""
website_theme = get_active_theme()
if not website_theme:
return {}
prepare(website_theme)
return { "theme": website_theme }
def prepare(theme):
for d in default_properties:
if not theme.get(d):
theme.set(d, default_properties[d])
theme.footer_border_color = get_shade(theme.footer_color, 10)
theme.border_color = get_shade(theme.background_color, 10)
webfonts = list(set(theme.get(key)
for key in ("heading_webfont", 'text_webfont') if theme.get(key)))
theme.webfont_import = "\n".join('@import url(@import url(http://fonts.googleapis.com/css?family={0}:400,300,400italic,700&subset=latin,latin-ext);)'\
.format(font.replace(" ", "+")) for font in webfonts)
# move @import from css field to the top of the css file
if theme.css and "@import url" in theme.css:
webfont_import = list(set(re.findall("@import url\([^\(\)]*\);", theme.css)))
theme.webfont_import += "\n" + "\n".join(webfont_import)
for wfimport in webfont_import:
theme.css = theme.css.replace(wfimport, "")
| mit |
dh1tw/pyhamtools | pyhamtools/locator.py | 1 | 10878 | from __future__ import division
from math import pi, sin, cos, atan2, sqrt, radians, log, tan, degrees
from datetime import datetime
import pytz
import ephem
UTC = pytz.UTC
def latlong_to_locator (latitude, longitude):
"""converts WGS84 coordinates into the corresponding Maidenhead Locator
Args:
latitude (float): Latitude
longitude (float): Longitude
Returns:
string: Maidenhead locator
Raises:
ValueError: When called with wrong or invalid input args
TypeError: When args are non float values
Example:
The following example converts latitude and longitude into the Maidenhead locator
>>> from pyhamtools.locator import latlong_to_locator
>>> latitude = 48.5208333
>>> longitude = 9.375
>>> latlong_to_locator(latitude, longitude)
'JN48QM'
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
if longitude >= 180 or longitude <= -180:
raise ValueError
if latitude >= 90 or latitude <= -90:
raise ValueError
longitude += 180;
latitude +=90;
locator = chr(ord('A') + int(longitude / 20))
locator += chr(ord('A') + int(latitude / 10))
locator += chr(ord('0') + int((longitude % 20) / 2))
locator += chr(ord('0') + int(latitude % 10))
locator += chr(ord('A') + int((longitude - int(longitude / 2) * 2) / (2 / 24)))
locator += chr(ord('A') + int((latitude - int(latitude / 1) * 1 ) / (1 / 24)))
return locator
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'):
raise ValueError
if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'):
raise ValueError
if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'):
raise ValueError
if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'):
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'):
raise ValueError
if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'):
raise ValueError
longitude = (ord(locator[0]) - ord('A')) * 20 - 180
latitude = (ord(locator[1]) - ord('A')) * 10 - 90
longitude += (ord(locator[2]) - ord('0')) * 2
latitude += (ord(locator[3]) - ord('0'))
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord('A')) * (2 / 24)
latitude += ((ord(locator[5])) - ord('A')) * (1 / 24)
# move to center of subsquare
longitude += 1 / 24
latitude += 0.5 / 24
else:
# move to center of square
longitude += 1;
latitude += 0.5;
return latitude, longitude
def calculate_distance(locator1, locator2):
"""calculates the (shortpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance
>>> calculate_distance("JN48QM", "QF67bf")
16466.413
"""
R = 6371 #earh radius
lat1, long1 = locator_to_latlong(locator1)
lat2, long2 = locator_to_latlong(locator2)
d_lat = radians(lat2) - radians(lat1)
d_long = radians(long2) - radians(long1)
r_lat1 = radians(lat1)
r_long1 = radians(long1)
r_lat2 = radians(lat2)
r_long2 = radians(long2)
a = sin(d_lat/2) * sin(d_lat/2) + cos(r_lat1) * cos(r_lat2) * sin(d_long/2) * sin(d_long/2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = R * c #distance in km
return d;
def calculate_distance_longpath(locator1, locator2):
"""calculates the (longpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the longpath distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance_longpath
>>> calculate_distance_longpath("JN48QM", "QF67bf")
23541.5867
"""
c = 40008 #[km] earth circumference
sp = calculate_distance(locator1, locator2)
return c - sp
def calculate_heading(locator1, locator2):
"""calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading
>>> calculate_heading("JN48QM", "QF67bf")
74.3136
"""
lat1, long1 = locator_to_latlong(locator1)
lat2, long2 = locator_to_latlong(locator2)
r_lat1 = radians(lat1)
r_lon1 = radians(long1)
r_lat2 = radians(lat2)
r_lon2 = radians(long2)
d_lon = radians(long2 - long1)
b = atan2(sin(d_lon)*cos(r_lat2),cos(r_lat1)*sin(r_lat2)-sin(r_lat1)*cos(r_lat2)*cos(d_lon)) # bearing calc
bd = degrees(b)
br,bn = divmod(bd+360,360) # the bearing remainder and final bearing
return bn
def calculate_heading_longpath(locator1, locator2):
"""calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136
"""
heading = calculate_heading(locator1, locator2)
lp = (heading + 180)%360
return lp
def calculate_sunrise_sunset(locator, calc_date=None):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
"""
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
latitude, longitude = locator_to_latlong(locator)
if calc_date is None:
calc_date = datetime.utcnow()
if type(calc_date) != datetime:
raise ValueError
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime()
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC)
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC)
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC)
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC)
return result
| mit |
jnerin/ansible | lib/ansible/modules/network/netvisor/pn_vrouterlbif.py | 43 | 10150 | #!/usr/bin/python
""" PN CLI vrouter-loopback-interface-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterlbif
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove vrouter-loopback-interface.
description:
- Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove
commands.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add vrouter loopback
interface and 'absent' to remove vrouter loopback interface.
required: True
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_index:
description:
- Specify the interface index from 1 to 255.
pn_interface_ip:
description:
- Specify the IP address.
required: True
"""
EXAMPLES = """
- name: add vrouter-loopback-interface
pn_vrouterlbif:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
- name: remove vrouter-loopback-interface
pn_vrouterlbif:
state: 'absent'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterlb command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterlb command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
LB_INTERFACE_EXISTS = None
# Index range
MIN_INDEX = 1
MAX_INDEX = 255
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the
vrouter-loopback-interface-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a loopback interface with the given ip exists on the given vRouter,
return LB_INTERFACE_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
# Global flags
global VROUTER_EXISTS, LB_INTERFACE_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for loopback interface
show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip '
'no-show-headers' % vrouter_name)
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if interface_ip in out:
LB_INTERFACE_EXISTS = True
else:
LB_INTERFACE_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-loopback-interface-add'
if state == 'absent':
command = 'vrouter-loopback-interface-remove'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_interface_ip=dict(type='str'),
pn_index=dict(type='int')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_interface_ip"]],
["state", "absent",
["pn_vrouter_name", "pn_interface_ip"]]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
index = module.params['pn_index']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if index:
if not MIN_INDEX <= index <= MAX_INDEX:
module.exit_json(
msg="Index must be between 1 and 255",
changed=False
)
index = str(index)
if command == 'vrouter-loopback-interface-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if LB_INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s does not exist on %s'
% (interface_ip, vrouter_name))
)
if not index:
# To remove loopback interface, we need the index.
# If index is not specified, get the Loopback interface index
# using the given interface ip.
get_index = cli
get_index += (' vrouter-loopback-interface-show vrouter-name %s ip '
'%s ' % (vrouter_name, interface_ip))
get_index += 'format index no-show-headers'
get_index = shlex.split(get_index)
out = module.run_command(get_index)[1]
index = out.split()[1]
cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index)
if command == 'vrouter-loopback-interface-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg=('vRouter %s does not exist' % vrouter_name)
)
if LB_INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s already exists on %s'
% (interface_ip, vrouter_name))
)
cli += (' %s vrouter-name %s ip %s'
% (command, vrouter_name, interface_ip))
if index:
cli += ' index %s ' % index
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
pcm17/tensorflow | tensorflow/contrib/training/python/training/bucket_ops.py | 22 | 17043 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_validate_keep_input = input_py._validate_keep_input
_shapes = input_py._shapes
_smart_cond = input_py._smart_cond
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but it's length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many=False)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=keep_input)
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=capacity,
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
maybe_enqueue = _smart_cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + which_dequeue(q)(
bs, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
| apache-2.0 |
kalkun/segmentor | preprocessing.py | 1 | 18286 | """
Example run
```
python3 preprocessing.py
```
"""
from PIL import Image
from scipy import ndimage
from skimage.filters import rank
from skimage.morphology import square
from skimage.morphology import disk
from skimage.morphology import white_tophat
import numpy
import cv2
import matplotlib.pyplot as plt
import PIL
from unbuffered import Unbuffered
import sys
# make print() not wait on the same buffer as the
# def it exists in:
sys.stdout = Unbuffered(sys.stdout)
class Preprocess:
"""
Preprocess class is responsible for anything preprocessing. It is build
for easy convolution of the preprocessing operations. Such that
operations may be easily followed by each other in any order by dotting
them out like so:
```
obj = Preprocess(
image="./STARE/im0255.ppm"
).meanFilter(
).show(
).greyOpening(
).show()
```
Notice how `show()` can be called after any operation. `show()` uses the
PIL Image debugger to show the image.
The implemented methods are generally limited to the methods describedin
Marin et al ITM 2011. However some methods allow for different
parameters to be used in the operation where the ones described in Marin
et al ITM 2011 are merely defaults.
To run the methods described in Marin et al 2011 in the same order as
described then the method `process` can be used:
```
obj = Preprocess(
image="./STARE/im0003.ppm"
).process(
).show(
).save(
path="./im0003_processed.png"
)
```
Non standard requesites for running are:
- scipy https://www.scipy.org/
- cv2 http://opencv-python-tutroals.readthedocs.io/en/latest/
- skimage http://scikit-image.org/
@class Preprocess
@param image {string} The path to the image to be preprocessed.
@param maskTh {int} The threshold value to create the mask from
@property source {string} Image source
@property image {PIL obj} PIL Image object
@property mask {numpy array} The mask matrix which is 0 in the area
outside FOV and 1's inside FOV
@property threshold {int} The threshold value from which the mask is
made from. Lower intensity than threshold and the pixel is
considered outside FOV and inside otherwise.
"""
def __init__(self, image, maskTh=50):
self.initialized = False
self.__printStatus(
"Initialize preprocessing for: " + image,
isEnd=True,
initial=True
)
self.source = image
self.name = image.split("/")[-1].split(".")[0]
self.image = Image.open(image)
self.loaded = self.image.load()
# self.threshold=50
self.threshold = maskTh
self.extractColorBands()
self.mask = numpy.uint8(
numpy.greater(
self.red_array,
self.threshold
).astype(int)
)
def save(self, path, array=numpy.empty(0), useMask=False, rotate=True):
"""
Saves the image array as png at the desired path.
@method save
@param path {string} the path where the image will be saved.
@param array {numpy array} The array which the image is made from,
default is self.image_array
@param useMask {Bool} Wether to reset non FOV pixel using the mask.
Default is False
"""
if not array.any():
array = self.image_array
if useMask:
array = array * self.mask
self._arrayToImage(array).save(path, "png", rotate=rotate)
self.__printStatus("saving to " + path + "...")
self.__printStatus("[done]", True)
return self
def _arrayToImage(self, array=numpy.empty(0), rotate=True):
"""
@private
@method arrayToImage
@param array {numpy array} array which is converted to an image
@param rotate {Bool} If true the image is transposed and rotated to
counter the numpy conversion of arrays.
"""
self.__printStatus("array to image...")
if not array.any():
array = self.image_array
img = Image.fromarray(numpy.uint8(array))
self.__printStatus("[done]", True)
if rotate:
return img.transpose(Image.FLIP_TOP_BOTTOM).rotate(-90)
else:
return img
def show(
self,
array=numpy.empty(0),
rotate=True,
invert=False,
useMask=False,
mark=None
):
"""
@method show
@param array {numpy array} image array to be shown.
@param rotate {Bool} Wether to rotate countering numpys array
conversion, default True.
@param invert {Bool} Invert the image, default False.
@param useMask {Bool} Reset non FOV pixels using the mask, default
is False.
"""
if not array.any():
array = self.image_array
im = self._arrayToImage(array, rotate=rotate)
self.__printStatus("show image...")
if useMask:
array = array * self.mask
if mark:
im = im.convert("RGB")
pixels = im.load()
x, y = mark
for i in range(x-1, x+1):
for j in range(y-1, y+1):
# color an area around the mark
# blue, for easilier visibility
pixels[i, j] = (0, 0, 255)
if invert:
Image.eval(im, lambda x:255-x).show()
else:
print("#####", im.mode, "#####")
im.show()
self.__printStatus("[done]", True)
return self
def extractColorBands(self):
"""
Returns a greyscaled array from the green channel in
the original image.
@method extractColorBands
"""
self.__printStatus("Extract color bands...")
green_array = numpy.empty([self.image.size[0], self.image.size[1]], int)
red_array = numpy.empty([self.image.size[0], self.image.size[1]], int)
for x in range(self.image.size[0]):
for y in range(self.image.size[1]):
red_array[x,y] = self.loaded[x,y][0]
green_array[x,y] = self.loaded[x,y][1]
self.green_array = green_array
self.red_array = red_array
self.image_array = self.green_array
self.__printStatus("[done]", True)
return self
def greyOpening(self, array=numpy.empty(0)):
"""
Makes a 3x3 morphological grey opening
@method greyOpening
@param array {numpy array} array to operate on.
"""
self.__printStatus("Grey opening...")
if not array.any():
array = self.image_array
self.grey_opened = ndimage.morphology.grey_opening(array, [3,3])
self.image_array = self.grey_opened * self.mask
self.__printStatus("[done]", True)
return self
def meanFilter(self, m=3, array=numpy.empty(0)):
"""
Mean filtering, replaces the intensity value, by the average
intensity of a pixels neighbours including itself.
m is the size of the filter, default is 3x3
@method meanFilter
@param m {int} The width and height of the m x m filtering matrix,
default is 3.
@param array {numpy array} the array which the operation is carried
out on.
"""
self.__printStatus("Mean filtering " + str(m) + "x" + str(m) + "...")
if not array.any():
array = self.image_array
if array.dtype not in ["uint8", "uint16"]:
array = numpy.uint8(array)
mean3x3filter = rank.mean(array, square(m), mask=self.mask)
self.image_array = mean3x3filter * self.mask
self.__printStatus("[done]", True)
return self
def gaussianFilter(self, array=numpy.empty(0), sigma=1.8, m=9):
"""
@method gaussianFilter
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
@param sigma {Float} The value of sigma to be used with the gaussian
filter operation
@param m {int} The size of the m x m matrix to filter with.
"""
self.__printStatus(
"Gaussian filter sigma=" + str(sigma) + ", m=" + str(m) + "..."
)
if not array.any():
array = self.image_array
self.image_array = cv2.GaussianBlur(array, (m,m), sigma) * self.mask
self.__printStatus("[done]", True)
return self
def _getBackground(self, array=numpy.empty(0), threshold=None):
"""
_getBackground returns an image unbiased at the edge of the FOV
@method _getBackground
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
@param threshold {int} Threshold that is used to compute a
background image, default is self.threshold.
"""
if not array.any():
array = self.red_array
if not threshold:
threshold = self.threshold
saved_image_array = self.image_array
background = self.meanFilter(m=69).image_array
self.__printStatus("Get background image...")
# reset self.image_array
self.image_array = saved_image_array
for x in range(len(background)):
for y in range(len(background[0])):
if array[x,y] > threshold:
if x-35 > 0:
x_start = x-35
else:
x_start = 0
if x+34 < len(background):
x_end = x+34
else:
x_end = len(background) -1
if y-35 > 0:
y_start = y-35
else:
y_start = 0
if y+35 < len(background[0]):
y_end = y+35
else:
y_end = len(background[0]) -1
# 1 is added to the right and bottom boundary because of
# pythons way of indexing
x_end += 1
y_end += 1
# mask is is the same subMatrix but taken from the original
# image array
mask = array[x_start:x_end, y_start:y_end]
# indexes of the non fov images
nonFOVs = numpy.less(mask, threshold)
# indexes of FOVs
FOVs = numpy.greater(mask, threshold)
# subMat is a 69x69 matrix with x,y as center
subMat = background[x_start:x_end, y_start:y_end]
# subMat must be a copy in order to not allocate values into
# background directly
subMat = numpy.array(subMat, copy=True)
subMat[nonFOVs] = subMat[FOVs].mean()
# finding every element less than 10 from the original image
# and using this as indices on the background subMatrix
# is used to calculate the average from the 'remaining
# pixels in the square'
background[x,y] = subMat.mean()
self.__printStatus("[done]", True)
return background
def subtractBackground(self, array=numpy.empty(0)):
"""
@method subtractBackground
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
if not array.any():
array = self.image_array
background = self._getBackground() * self.mask
self.__printStatus("Subtract background...")
self.image_array = numpy.subtract(
numpy.int16(array),
numpy.int16(background)
) * self.mask
self.__printStatus("[done]", True)
return self
def linearTransform(self, array=numpy.empty(0)):
"""
Shade correction maps the background image into values
that fits the grayscale 8 bit images [0-255]
from: http://stackoverflow.com/a/1969274/2853237
@method linearTransform
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Linear transforming...")
if not array.any():
array = self.image_array
# Figure out how 'wide' each range is
leftSpan = array.max() - array.min()
rightSpan = 255
array = ((array - array.min()) / leftSpan) * rightSpan
self.image_array = array * self.mask
self.__printStatus("[done]", True)
return self
def transformIntensity(self, array=numpy.empty(0)):
"""
@method transformIntensity
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Scale intensity levels...")
if not array.any():
array = self.image_array
counts = numpy.bincount(array.astype(int).flat)
ginput_max = numpy.argmax(counts)
for x in range(len(array)):
for y in range(len(array[0])):
st = str(array[x,y]) + " ==> "
st += str(array[x,y] + 128 - ginput_max) + " "
array[x,y] + 128 - ginput_max
if array[x,y] < 0:
array[x,y] = 0
elif array[x,y] > 255:
array[x,y] = 255
s = str(ginput_max)
self.image_array = array * self.mask
self.__printStatus("[done]", True)
return self
def vesselEnhance(self, array=numpy.empty(0)):
"""
@method vesselEnhance
@param array {numpy array} the array the operation is carried out
on, default is the image_array.
"""
self.__printStatus("Vessel enhancement...");
if not array.any():
array = self.image_array
# disk shaped mask with radius 8
disk_shape = disk(8)
# the complimentary image is saved to hc:
array = numpy.uint8(array)
hc = 255 - array
# Top Hat transform
# https://en.wikipedia.org/wiki/Top-hat_transform
# White top hat is defined as the difference between
# the opened image and the original image.
# in this case the starting image is the complimentary image `hc`
self.image_array = white_tophat(hc, selem=disk_shape) * self.mask
self.__printStatus("[done]", True)
return self
def __printStatus(self, status, isEnd=False, initial=False):
"""
@private
@method __printStatus
@param status {string}
@param isEnd {Bool} Wether to end with a newline or not, default is
false.
@param initial {Bool} Wether this is the first status message to be
printed, default False.
"""
if not initial and not isEnd:
status = "\t" + status
if initial:
status = "\n" + status
if isEnd:
delim="\n"
else:
delim=""
# set tabs so status length is 48
tabs = ((48 - len(status)) // 8) * "\t"
status += tabs
print(status, end=delim, sep="")
def process(self, enhance=True, onlyEnhance=False):
"""
`process` starts the preprocess process described in
Marin et al ITM [2011]
The article works with two types of preprocessed images.
The first is the convoluted image obtained with all operations
except for `vesselEnhance` denoted as a homogenized image. And the
second is the vessel enhanced image which is the convolution of the
vessel enhancement operation on the homogenized image.
This method supports both images. If `enhance` is False then
self.image_array will be of the homogenized image and afterwards the
vessel enhanced image can be computed without starting over by
setting `onlyEnhance` to True. So to compute both images one at a
time one could call:
```
obj = Preprocess(
"./im0075.ppm"
)
.process(
enhance=False
).show(
).process(
onlyEnhance=True
).show()
```
@method process
@method enhance {Bool} Wether to also process the vessel enhancement
operation or not, default True.
@method onlyEnhance {Bool} Wether to only do the vessel enhancement
operation, default False.
"""
if not onlyEnhance:
self.greyOpening()
self.meanFilter()
self.gaussianFilter()
self.subtractBackground()
self.linearTransform()
self.transformIntensity()
if enhance or onlyEnhance:
self.vesselEnhance()
# returns the object where
# all described preprocess has taken place
# available on self.feature_array or self.show(), self.save(<path>)
return self | bsd-3-clause |
rfinn/LCS | paper1code/LCSReadmasterBase.py | 1 | 17709 | #!/usr/bin/env python
import pyfits
from LCScommon import *
from pylab import *
import os
import mystuff as my
class baseCluster:
def __init__(self,clustername):
#Get current path so program can tell if this is being run on Becky or Rose's computer
self.prefix=clustername
self.cra=clusterRA[self.prefix]
self.cdec=clusterDec[self.prefix]
self.cz=clusterz[self.prefix]
self.biweightvel=clustercbi[self.prefix]
self.biweightscale=clustersbi[self.prefix]
self.r200=2.02*(self.biweightscale)/1000./sqrt(OmegaL+OmegaM*(1.+self.cz)**3)*H0/70. # in Mpc
self.r200deg=self.r200*1000./my.DA(self.cz,h)/3600.
self.cdMpc=self.biweightvel/H0
self.cdcm=self.cdMpc*3.e24
self.csigma=self.biweightscale
self.mcl=my.clusterMass(self.csigma,self.cz,h)
self.AngDistance=my.DA(self.cz,h)
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
infile='/Users/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
infile='/home/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/home/rfinn/'
self.cutoutpath=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'
#infile='/home/rfinn/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
tb=pyfits.open(infile)
tbdata=tb[1].data
tb.close()
self.agcflag=tbdata.field('AGCflag')
self.HIflag=tbdata.field('HIFLAG')
self.sdssflag=tbdata.field('SDSSflag')
self.sdssphotflag=tbdata.field('SDSSphotflag')
self.mpaflag=tbdata.field('MPAFLAG')
self.apexflag=tbdata.field('APEXFLAG')
self.sexsdssflag=tbdata.field('SEXSDSSflag')
self.sex24flag=tbdata.field('SEX24FLAG')
self.agcvoptflag=tbdata.field('AGCVOPTFLAG')
self.agcnumber=tbdata.field('AGCNUMBER')
self.raagc=tbdata.field('AGCRA')
self.decagc=tbdata.field('AGCDEC')
self.a100=tbdata.field('A100')
self.b100=tbdata.field('B100')
self.mag10=tbdata.field('MAG10')
self.posang=tbdata.field('POSANG')
self.bsteintype=tbdata.field('BSTEINTYPE')
self.vopt=tbdata.field('VOPT')
self.verr=tbdata.field('VERR')
self.vsource=tbdata.field('VSOURCE')
self.flux100=tbdata.field('FLUX100')
self.rms100=tbdata.field('RMS100')
self.v21=tbdata.field('V21')
self.width=tbdata.field('WIDTH')
self.widtherr=tbdata.field('WIDTHERR')
#sdss info
self.sdssra=tbdata.field('SDSSRA')
self.sdssdec=tbdata.field('SDSSDEC')
self.sdssphotra=tbdata.field('SDSSphotRA')
self.sdssphotdec=tbdata.field('SDSSphotDEC')
self.sdssmag=tbdata.field('SDSSMAG')
self.sdssu=self.sdssmag[:,0]
self.sdssg=self.sdssmag[:,1]
self.sdssr=self.sdssmag[:,2]
self.sdssi=self.sdssmag[:,3]
self.sdssz=self.sdssmag[:,4]
self.sdssmagerr=tbdata.field('SDSSMAGERR')
self.sdssuerr=self.sdssmagerr[:,0]
self.sdssgerr=self.sdssmagerr[:,1]
self.sdssrerr=self.sdssmagerr[:,2]
self.sdssierr=self.sdssmagerr[:,3]
self.sdsszerr=self.sdssmagerr[:,4]
self.sdssspecz=tbdata.field('SDSSSPECZ')
self.sdssvopt=tbdata.field('SDSSVOPT')
self.sdsshaew=tbdata.field('SDSSHAEW')
self.sdsshaewerr=tbdata.field('SDSSHAEWERR')
self.sdssplate=tbdata.field('SDSSPLATE')
self.sdssfiberid=tbdata.field('SDSSFIBERID')
self.sdsstile=tbdata.field('SDSSTILE')
self.sdssrun=tbdata.field('SDSSRUN')
self.sdssrerun=tbdata.field('SDSSRERUN')
self.sdsscamcol=tbdata.field('SDSSCAMCOL')
self.sdssfield=tbdata.field('SDSSFIELD')
self.mpahalpha=tbdata.field('MPAHALPHA')
self.mpahbeta=tbdata.field('MPAHBETA')
self.mpao3=tbdata.field('MPAOIII')
self.mpan2=tbdata.field('MPANII')
#sextractor info
self.numberser=tbdata.field('NUMBERSER')
self.ximageser=tbdata.field('XIMAGESER')
self.yimageser=tbdata.field('YIMAGESER')
self.xminimageser=tbdata.field('XMINIMAGESER')
self.xmaximageser=tbdata.field('XMAXIMAGESER')
self.yminimageser=tbdata.field('YMINIMAGESER')
self.raser=tbdata.field('RASER')
self.decser=tbdata.field('DECSER')
self.fluxisoser=tbdata.field('FLUXISOSER')
self.fluxerrisoser=tbdata.field('FLUXERRISOSER')
self.magisoser=tbdata.field('MAGISOSER')
self.magerrisoser=tbdata.field('MAGERRISOSER')
self.fluxautoser=tbdata.field('FLUXAUTOSER')
self.fluxerrautoser=tbdata.field('FLUXERRAUTOSER')
self.magautoser=tbdata.field('MAGAUTOSER')
self.magerrautoser=tbdata.field('MAGERRAUTOSER')
self.fluxpetroser=tbdata.field('FLUXPETROSER')
self.fluxerrpetroser=tbdata.field('FLUXERRPETROSER')
self.magpetroser=tbdata.field('MAGPETROSER')
self.magerrpetroser=tbdata.field('MAGERRPETROSER')
self.kronradser=tbdata.field('KRONRADSER')#kron radius
self.petroradser=tbdata.field('PETRORADSER')#petrosian radius
self.fluxradser=tbdata.field('FLUXRADSER')#1/2 light radius
self.isoareaser=tbdata.field('ISOAREASER')
self.aworldser=tbdata.field('AWORLDSER')
self.bworldser=tbdata.field('BWORLDSER')
self.thetaser=tbdata.field('THETASER')
self.errthetaser=tbdata.field('ERRTHETASER')
self.thetaj2000ser=tbdata.field('THETAJ2000SER')
self.errthetaj2000ser=tbdata.field('ERRTHETAJ2000SER')
self.elongser=tbdata.field('ELONGATIONSER')
self.elliptser=tbdata.field('ELLIPTICITYSER')
self.fwhmser=tbdata.field('FWHMSER')
self.flagsser=tbdata.field('FLAGSSER')
self.classstarser=tbdata.field('CLASSSTARSER')
#SEXTRACTOR output 24 micron data
self.numberse24=tbdata.field('NUMBERSE24')
self.ximagese24=tbdata.field('XIMAGESE24')
self.yimagese24=tbdata.field('YIMAGESE24')
self.xminimagese24=tbdata.field('XMINIMAGESE24')
self.xmaximagese24=tbdata.field('XMAXIMAGESE24')
self.xminimagese24=tbdata.field('YMINIMAGESE24')
self.rase24=tbdata.field('RASE24')
self.decse24=tbdata.field('DECSE24')
self.fluxisose24=tbdata.field('FLUXISOSE24')
self.fluxerrisose24=tbdata.field('FLUXERRISOSE24')
self.magisose24=tbdata.field('MAGISOSE24')
self.magerrisose24=tbdata.field('MAGERRISOSE24')
self.fluxautose24=tbdata.field('FLUXAUTOSE24')
self.fluxerrautose24=tbdata.field('FLUXERRAUTOSE24')
self.magautose24=tbdata.field('MAGAUTOSE24')
self.magerrautose24=tbdata.field('MAGERRAUTOSE24')
self.fluxpetrose24=tbdata.field('FLUXPETROSE24')
self.fluxerrpetrose24=tbdata.field('FLUXERRPETROSE24')
self.magpetrose24=tbdata.field('MAGPETROSE24')
self.magerrpetrose24=tbdata.field('MAGERRPETROSE24')
self.kronradse24=tbdata.field('KRONRADSE24')
self.petroradse24=tbdata.field('PETRORADSE24')
self.fluxradse24=tbdata.field('FLUXRADSE24')
self.isoarease24=tbdata.field('ISOAREASE24')
self.aworldse24=tbdata.field('AWORLDSE24')
self.bworldse24=tbdata.field('BWORLDSE24')
self.thetase24=tbdata.field('THETASE24')
self.errthetase24=tbdata.field('ERRTHETASE24')
self.thetaj2000se24=tbdata.field('THETAJ2000SE24')
self.errthetaj2000se24=tbdata.field('ERRTHETAJ2000SE24')
self.elongse24=tbdata.field('ELONGATIONSE24')
self.elliptse24=tbdata.field('ELLIPTICITYSE24')
self.fwhmse24=tbdata.field('FWHMSE24')
self.flagsse24=tbdata.field('FLAGSSE24')
self.classstarse24=tbdata.field('CLASSSTARSE24')
self.f24dist=self.fluxautose24[self.sex24flag]
#apex output
self.mipsra=tbdata.field('MIPSRA')
self.mipsdec=tbdata.field('MIPSDEC')
self.mipsflux=tbdata.field('MIPSFLUX')
self.mipsfluxerr=tbdata.field('MIPSFLUXERR')
self.mipssnr=tbdata.field('MIPSSNR')
self.mipsdeblend=tbdata.field('MIPSDEBLEND')
self.mipsfluxap1=tbdata.field('MIPSFLUXAP1')
self.mipsfluxap1err=tbdata.field('MIPSFLUXAP1ERR')
self.mipsfluxap2=tbdata.field('MIPSFLUXAP2')
self.mipsfluxap2err=tbdata.field('MIPSFLUXAP2ERR')
self.mipsfluxap3=tbdata.field('MIPSFLUXAP3')
self.mipsfluxap4err=tbdata.field('MIPSFLUXAP3ERR')
self.On24ImageFlag=tbdata.field('On24ImageFlag')
self.supervopt=tbdata.field('SUPERVOPT')
self.ra=tbdata.field('SUPERRA')
self.dec=tbdata.field('SUPERDEC')
self.stellarmass=tbdata.field('STELLARMASS')
self.stellarmass_cl=tbdata.field('STELLARMASS_CL')
self.sdssabsmag=tbdata.field('SDSSABSMAG')
self.sdssMu=self.sdssabsmag[:,0]
self.sdssMg=self.sdssabsmag[:,1]
self.sdssMr=self.sdssabsmag[:,2]
self.sdssMi=self.sdssabsmag[:,3]
self.sdssMz=self.sdssabsmag[:,4]
self.sdsslum=tbdata.field('SDSSLUM')
self.sdssLu=self.sdsslum[:,0]
self.sdssLg=self.sdsslum[:,1]
self.sdssLr=self.sdsslum[:,2]
self.sdssLi=self.sdsslum[:,3]
self.sdssLz=self.sdsslum[:,4]
self.sdssabsmag_cl=tbdata.field('SDSSABSMAG_CL')
self.sdssMu=self.sdssabsmag_cl[:,0]
self.sdssMg=self.sdssabsmag_cl[:,1]
self.sdssMr=self.sdssabsmag_cl[:,2]
self.sdssMi=self.sdssabsmag_cl[:,3]
self.sdssMz=self.sdssabsmag_cl[:,4]
self.sdsslum_cl=tbdata.field('SDSSLUM_CL')
self.sdssLu_cl=self.sdsslum_cl[:,0]
self.sdssLg_cl=self.sdsslum_cl[:,1]
self.sdssLr_cl=self.sdsslum_cl[:,2]
self.sdssLi_cl=self.sdsslum_cl[:,3]
self.sdssLz_cl=self.sdsslum_cl[:,4]
self.sdsscolc=tbdata.field('SDSSCOLC')
self.sdssrowc=tbdata.field('SDSSROWC')
self.membflag =tbdata.field('MEMBFLAG')
self.morphflag =tbdata.field('MORPHFLAG')
self.morph =tbdata.field('MORPH')
self.disturb =tbdata.field('DISTURB')
self.localdens =tbdata.field('LOCALDENS')
self.agn1 =tbdata.field('AGNKAUFF')
self.agn2 =tbdata.field('AGNKEWLEY')
self.agn3 =tbdata.field('AGNSTASIN')
self.n2halpha=(self.mpan2/self.mpahalpha)
self.o3hbeta=(self.mpao3/self.mpahbeta)
self.logn2halpha=log10(self.mpan2/self.mpahalpha)
self.logo3hbeta=log10(self.mpao3/self.mpahbeta)
self.ellipseflag24 =tbdata.field('ELLIPSEFLAG24')
self.ellipseflagsdss =tbdata.field('ELLIPSEFLAGSDSS')
self.ellipseflag =tbdata.field('ELLIPSEFLAG')
# galaxy zoo fields
self.galzooflag =tbdata.field('GALZOOFLAG')
self.galzoonvote =tbdata.field('GALZOONVOTE')
self.galzoopel =tbdata.field('GALZOOPEL')
self.galzoopcw =tbdata.field('GALZOOPCW')
self.galzoopacw =tbdata.field('GALZOOPACW')
self.galzoopedge =tbdata.field('GALZOOPEDGE')
self.galzoopdk =tbdata.field('GALZOOPDK')
self.galzoopmg =tbdata.field('GALZOOPMG')
self.galzoopcs =tbdata.field('GALZOOPCS')
self.galzoopeldebiased =tbdata.field('GALZOOPELDEBIASED')
self.galzoopcsdebiased =tbdata.field('GALZOOPCSDEBIASED')
self.galzoospiral =tbdata.field('GALZOOSPIRAL')
self.galzooelliptical =tbdata.field('GALZOOELLIPTICAL')
self.galzoouncertain =tbdata.field('GALZOOUNCERTAIN')
#new SDSS fields that quantify radial extent of galaxy
self.sdssIsoAr =tbdata.field('SDSSISOAR')
self.sdssIsoBr =tbdata.field('SDSSISOBR')
self.sdssIsoPhir =tbdata.field('SDSSISOPHIR')
self.sdssIsoPhirErr =tbdata.field('SDSSISOPHIERRR')
self.sdssExpRadr =tbdata.field('SDSSEXPRADR')
self.sdssExpABr =tbdata.field('SDSSEXPABR')
self.sdssExpABrErr =tbdata.field('SDSSEXPABRERR')
self.sdssExpPhir =tbdata.field('SDSSEXPPHIR')
self.sdssExpPhirErr =tbdata.field('SDSSEXPPHIERRR')
self.sdssPetroMag=tbdata.field('SDSSPETROMAG')
self.sdssPetroMagr=self.sdssPetroMag[:,2]
self.sdssPetroRad=tbdata.field('SDSSPETRORAD')
self.sdssPetroRadr=self.sdssPetroRad[:,2]
self.sdssPetroR50=tbdata.field('SDSSPETROR50')
self.sdssPetroR50r=self.sdssPetroR50[:,2]
self.sdssPetroR90=tbdata.field('SDSSPETROR90')
self.sdssPetroR90r=self.sdssPetroR90[:,2]
#de-redened magnitudes
self.sdssdered=tbdata.field('SDSSDERED')
self.sdssumag=self.sdssdered[:,0]
self.sdssgmag=self.sdssdered[:,1]
self.sdssrmag=self.sdssdered[:,2]
self.sdssimag=self.sdssdered[:,3]
self.sdsszmag=self.sdssdered[:,4]
# other Lum and SFR
self.HImass=tbdata.field('HIMASS')
self.L24=tbdata.field('L24')
self.L24err=tbdata.field('L24ERR')
self.Lir=tbdata.field('LIR')
self.Lirerr=tbdata.field('LIRERR')
self.SFR24=tbdata.field('SFR24')
self.SFR24err=tbdata.field('SFR24ERR')
self.SuperSFR24=tbdata.field('SUPERSFR24')
self.SuperSFR24err=tbdata.field('SUPERSFR24ERR')
self.HImass_cl=tbdata.field('HIMASS_CL')
self.L24_cl=tbdata.field('L24_CL')
self.L24err_cl=tbdata.field('L24ERR_CL')
self.Lir_cl=tbdata.field('LIR_CL')
self.Lirerr_cl=tbdata.field('LIRERR_CL')
self.SFR24_cl=tbdata.field('SFR24_CL')
self.SFR24err_cl=tbdata.field('SFR24ERR_CL')
self.SuperSFR24_cl=tbdata.field('SUPERSFR24_CL')
self.SuperSFR24err_cl=tbdata.field('SUPERSFR24ERR_CL')
#define red, green and blue galaxies
ur=self.sdssumag-self.sdssrmag
self.redflag=(ur > 2.3)
self.greenflag=(ur > 1.8) & (ur < 2.3)
self.blueflag=(ur<1.8)
#end of master table!
#self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & (self.galzoopcsdebiased > 0.6)
self.spiralFlag=self.galzooflag & self.galzoospiral
self.clustername=clustername
self.clusterra=clusterRA[clustername]
self.clusterdec=clusterDec[clustername]
self.dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
self.drR200=self.dr/self.r200deg
self.clustervel=clustervel[clustername]
self.clustersigma=clustersigma[clustername]
self.clustervmin=self.clustervel-3.*self.clustersigma
self.clustervmax=self.clustervel+3.*self.clustersigma
self.dist=sqrt((self.clusterra-self.ra)**2 + (self.clusterdec-self.dec)**2)
self.flagHI = (self.flux100 > 0.)
self.flagmemb = ((self.vopt > self.clustervmin) & (self.vopt < self.clustervmax)) | ((self.v21 > self.clustervmin) & (self.v21 < self.clustervmax))
self.dv=abs(self.supervopt-self.biweightvel)/self.biweightscale
self.allvelocity=3.e5*self.sdssspecz
for i in range(len(self.allvelocity)):
if self.sdssflag[i] < 1:
if self.v21[i] > 0:
self.allvelocity[i]=self.v21[i]
else:
self.allvelocity[i]=self.vopt[i]
self.nmemb=len(self.dist[self.membflag & self.On24ImageFlag])
self.nfield=len(self.dist[self.On24ImageFlag])-self.nmemb
print self.clustername,": ","N members = ",self.nmemb," N field = ",self.nfield
print ' N spirals = ',sum(self.spiralFlag),' Nspiral members = ',sum(self.spiralFlag&self.membflag)
print ' N spirals on 24um image = ',sum(self.spiralFlag & self.On24ImageFlag),' Nspiral members = ',sum(self.spiralFlag&self.membflag & self.On24ImageFlag)
print ' N galaxies on 24um image = ',sum(self.On24ImageFlag),' Nspiral members = ',sum(self.membflag & self.On24ImageFlag)
self.agcdict=dict((a,b) for a,b in zip(self.agcnumber,arange(len(self.agcnumber))))
#self.L24=zeros(len(self.mipsflux),'d')
#self.L24err=zeros(len(self.mipsflux),'d')
# calculate HI deficiency using Toribio et al 2011 results
# their relation is
# log(M_HI/Msun) = 8.72 + 1.25 log(D_25,r/kpc)
# and
# log D_25 = log D_25(obs) + beta log(b/a), where beta = 0.35 in r-band
# NOTE: SDSS isophotal radii are given in pixels!!!!
a=self.sdssIsoAr
b=self.sdssIsoBr
# convert from arcsec to kpc with self.AngDistance (which is in units of kpc/arcsec)
# multiply by 2 to convert from radius to diameter
# multiply by sdss pixel scale (0.39) b/c isophotal radii are given in pixels
self.D25obskpc=2.*self.sdssIsoAr*sdsspixelscale*self.AngDistance
# apply correction from toribio et al 2011
self.logD25kpc=log10(self.D25obskpc) + 0.35*log10(b/a)
# use toribio et al relation to predict the expected HI mass, including factor of 2 correction
self.HImassExpected = 10.**(8.72 + 1.25*(self.logD25kpc-log10(2.)))
self.HImassExpFromMr=10.**(6.44-0.18*self.sdssMr)
self.HImassExpFromgr=10.**(8.84+1.81*(self.sdssgmag-self.sdssrmag))
# calculate deficiency as log expected - log observed
self.HIDef = log10(self.HImassExpected) - log10(self.HImass)
self.myHIDef = log10(self.HImassExpected -self.HImass)
self.agnflag=self.agn1#use Kauffmann et al 2003 cut
self.irflag = self.apexflag & self.membflag
#set flag fot galaxies with dv < 3 sigma
self.dvflag = self.dv < 3.
| gpl-3.0 |
saveman71/Anime-Ultime-Downloader | bs4/tests/test_builder_registry.py | 485 | 5374 | """Tests of the builder registry."""
import unittest
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
try:
from bs4.builder import (
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
)
LXML_PRESENT = True
except ImportError:
LXML_PRESENT = False
class BuiltInRegistryTest(unittest.TestCase):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('fast', 'html'),
LXMLTreeBuilder)
if LXML_PRESENT:
self.assertEqual(registry.lookup('permissive', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('strict', 'html'),
HTMLParserTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib', 'html'),
HTML5TreeBuilder)
def test_lookup_by_markup_type(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
else:
self.assertEqual(registry.lookup('xml'), None)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
else:
self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder)
def test_named_library(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('lxml', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('lxml', 'html'),
LXMLTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib'),
HTML5TreeBuilder)
self.assertEqual(registry.lookup('html.parser'),
HTMLParserTreeBuilder)
def test_beautifulsoup_constructor_does_lookup(self):
# You can pass in a string.
BeautifulSoup("", features="html")
# Or a list of strings.
BeautifulSoup("", features=["html", "fast"])
# You'll get an exception if BS can't find an appropriate
# builder.
self.assertRaises(ValueError, BeautifulSoup,
"", features="no-such-feature")
class RegistryTest(unittest.TestCase):
"""Test the TreeBuilderRegistry class in general."""
def setUp(self):
self.registry = TreeBuilderRegistry()
def builder_for_features(self, *feature_list):
cls = type('Builder_' + '_'.join(feature_list),
(object,), {'features' : feature_list})
self.registry.register(cls)
return cls
def test_register_with_no_features(self):
builder = self.builder_for_features()
# Since the builder advertises no features, you can't find it
# by looking up features.
self.assertEqual(self.registry.lookup('foo'), None)
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
self.assertEqual(self.registry.lookup(), builder)
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('baz'), None)
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
self.assertEqual(self.registry.lookup(), builder2)
def test_lookup_fails_when_no_tree_builders_registered(self):
self.assertEqual(self.registry.lookup(), None)
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
has_the_other = self.builder_for_features('bar')
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
lacks_one = self.builder_for_features('bar')
has_the_other = self.builder_for_features('foo')
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
self.assertEqual(self.registry.lookup('foo', 'bar'),
has_both_late)
# There is only one builder featuring 'foo', 'bar', and 'baz'.
self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
has_both_early)
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
self.assertEqual(self.registry.lookup('bar', 'baz'), None)
| apache-2.0 |
dfang/odoo | addons/payment_authorize/tests/test_authorize.py | 12 | 10493 | # -*- coding: utf-8 -*-
import hashlib
import hmac
import time
import urlparse
import unittest
from lxml import objectify
import odoo
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment.tests.common import PaymentAcquirerCommon
from odoo.addons.payment_authorize.controllers.main import AuthorizeController
from odoo.tools import mute_logger
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class AuthorizeCommon(PaymentAcquirerCommon):
def setUp(self):
super(AuthorizeCommon, self).setUp()
# authorize only support USD in test environment
self.currency_usd = self.env['res.currency'].search([('name', '=', 'USD')], limit=1)[0]
# get the authorize account
self.authorize = self.env.ref('payment.payment_acquirer_authorize')
# Be sure to be in 'capture' mode
self.authorize.auto_confirm = 'confirm_so'
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class AuthorizeForm(AuthorizeCommon):
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
]) + '^'
return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest()
def test_10_Authorize_form_render(self):
self.assertEqual(self.authorize.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
form_values = {
'x_login': self.authorize.authorize_login,
'x_trans_key': self.authorize.authorize_transaction_key,
'x_amount': '320.0',
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (self.authorize.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._return_url),
'x_cancel_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._cancel_url),
'return_url': None,
'x_currency_code': 'USD',
'x_invoice_num': 'SO004',
'x_first_name': 'Norbert',
'x_last_name': 'Buyer',
'x_address': 'Huge Street 2/543',
'x_city': 'Sin City',
'x_zip': '1000',
'x_country': 'Belgium',
'x_phone': '0032 12 34 56 78',
'x_email': 'norbert.buyer@example.com',
'x_state': None,
'x_ship_to_first_name': 'Norbert',
'x_ship_to_last_name': 'Buyer',
'x_ship_to_address': 'Huge Street 2/543',
'x_ship_to_city': 'Sin City',
'x_ship_to_zip': '1000',
'x_ship_to_country': 'Belgium',
'x_ship_to_phone': '0032 12 34 56 78',
'x_ship_to_email': 'norbert.buyer@example.com',
'x_ship_to_state': None,
}
form_values['x_fp_hash'] = self._authorize_generate_hashing(form_values)
# render the button
res = self.authorize.render('SO004', 320.0, self.currency_usd.id, values=self.buyer_values)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://test.authorize.net/gateway/transact.dll', 'Authorize: wrong form POST url')
for el in tree.iterfind('input'):
values = el.values()
if values[1] in ['submit', 'x_fp_hash', 'return_url', 'x_state', 'x_ship_to_state']:
continue
self.assertEqual(
unicode(values[2], "utf-8"),
form_values[values[1]],
'Authorize: wrong value for input %s: received %s instead of %s' % (values[1], values[2], form_values[values[1]])
)
@mute_logger('odoo.addons.payment_authorize.models.payment', 'ValidationError')
def test_20_authorize_form_management(self):
# be sure not to do stupid thing
self.assertEqual(self.authorize.environment, 'test', 'test without test environment')
# typical data posted by authorize after client has successfully paid
authorize_post_data = {
'return_url': u'/shop/payment/validate',
'x_MD5_Hash': u'7934485E1C105940BE854208D10FAB4F',
'x_account_number': u'XXXX0027',
'x_address': u'Huge Street 2/543',
'x_amount': u'320.00',
'x_auth_code': u'E4W7IU',
'x_avs_code': u'Y',
'x_card_type': u'Visa',
'x_cavv_response': u'2',
'x_city': u'Sun City',
'x_company': u'',
'x_country': u'Belgium',
'x_cust_id': u'',
'x_cvv2_resp_code': u'',
'x_description': u'',
'x_duty': u'0.00',
'x_email': u'norbert.buyer@example.com',
'x_fax': u'',
'x_first_name': u'Norbert',
'x_freight': u'0.00',
'x_invoice_num': u'SO004',
'x_last_name': u'Buyer',
'x_method': u'CC',
'x_phone': u'0032 12 34 56 78',
'x_po_num': u'',
'x_response_code': u'1',
'x_response_reason_code': u'1',
'x_response_reason_text': u'This transaction has been approved.',
'x_ship_to_address': u'Huge Street 2/543',
'x_ship_to_city': u'Sun City',
'x_ship_to_company': u'',
'x_ship_to_country': u'Belgium',
'x_ship_to_first_name': u'Norbert',
'x_ship_to_last_name': u'Buyer',
'x_ship_to_state': u'',
'x_ship_to_zip': u'1000',
'x_state': u'',
'x_tax': u'0.00',
'x_tax_exempt': u'FALSE',
'x_test_request': u'false',
'x_trans_id': u'2217460311',
'x_type': u'auth_capture',
'x_zip': u'1000'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
tx = self.env['payment.transaction'].create({
'amount': 320.0,
'acquirer_id': self.authorize.id,
'currency_id': self.currency_usd.id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france.id})
# validate it
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
# check state
self.assertEqual(tx.state, 'done', 'Authorize: validation did not put tx into done state')
self.assertEqual(tx.acquirer_reference, authorize_post_data.get('x_trans_id'), 'Authorize: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'acquirer_reference': False})
# simulate an error
authorize_post_data['x_response_code'] = u'3'
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
# check state
self.assertEqual(tx.state, 'error', 'Authorize: erroneous validation did not put tx into error state')
@unittest.skip("Authorize s2s test disabled: We do not want to overload Authorize.net with runbot's requests")
def test_30_authorize_s2s(self):
# be sure not to do stupid thing
authorize = self.authorize
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# add credential
# FIXME: put this test in master-nightly on odoo/odoo + create sandbox account
authorize.write({
'authorize_transaction_key': '',
'authorize_login': '',
})
self.assertTrue(authorize.authorize_test_credentials, 'Authorize.net: s2s authentication failed')
# create payment meethod
payment_token = self.env['payment.token'].create({
'acquirer_id': authorize.id,
'partner_id': self.buyer_id,
'cc_number': '4111 1111 1111 1111',
'cc_expiry': '02 / 26',
'cc_brand': 'visa',
'cc_cvc': '111',
'cc_holder_name': 'test',
})
# create normal s2s transaction
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_ref_%s' % odoo.fields.Date.today(),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'done',)
# switch to 'authorize only'
# create authorize only s2s transaction & capture it
self.authorize.auto_confirm = 'authorize'
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_%s' % int(time.time()),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'authorized')
transaction.action_capture()
self.assertEqual(transaction.state, 'done')
# create authorize only s2s transaction & void it
self.authorize.auto_confirm = 'authorize'
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_%s' % int(time.time()),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'authorized')
transaction.action_void()
self.assertEqual(transaction.state, 'cancel')
| agpl-3.0 |
metaRx/scripts | backups/sync2S3.py | 2 | 7146 | #!/usr/bin/env python
__author__ = 'Mitch Anderson'
__date__ = '08-14-2011'
__version__ = '1.0'
"""
Releasing Under the New BSD License same as where I got some of the code from.
Most of the S3 directory sync code came from the django-command-extensions project
on Google Code: http://code.google.com/p/django-command-extensions/wiki/sync_media_s3
I just added a more generic wrapper and the delete portions.
"""
import datetime
import os
import sys
import time
import getopt
import mimetypes
import email
# Make sure boto is available
try:
import boto
import boto.exception
except ImportError:
raise ImportError, "The boto Python library is not installed."
class S3Sync:
def __init__(self, verbose=True, force=False, quiet=False, delete=True, **kwargs):
self.SYNC_DIR = kwargs['S3_SYNC_DIR']
self.AWS_BUCKET_NAME = kwargs['S3_BUCKET']
self.AWS_ACCESS_KEY_ID = kwargs['KEY']
self.AWS_SECRET_ACCESS_KEY = kwargs['SECRET']
self.FILTER_LIST = ['.DS_Store','.svn','.idea',]
self.verbosity = verbose
self.quiet = quiet
self.delete = delete
self.do_force = force
self.do_gzip = False
self.do_expires = True
self.upload_count = 0
self.skip_count = 0
self.del_count = 0
self.prefix = ""
def sync_s3(self):
"""
Walks sync directory and uploads to S3
"""
bucket, key = self.open_s3()
os.path.walk(self.SYNC_DIR, self.upload_s3,
(bucket, key, self.AWS_BUCKET_NAME, self.SYNC_DIR))
def del_s3(self):
"""
Removes Files from S3 that are not on the local file system
"""
bucket, key = self.open_s3()
s3list = bucket.list()
root_dir, prefix = self.SYNC_DIR.rsplit('/', 1 )
for k in s3list:
if not os.path.isfile(os.path.join(root_dir, k.name)):
if self.verbosity:
print "Deleting %s..." % (k.name)
bucket.delete_key(k.name)
self.del_count += 1
def open_s3(self):
"""
Opens connection to S3 returning bucket and key
"""
conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
try:
bucket = conn.get_bucket(self.AWS_BUCKET_NAME)
except boto.exception.S3ResponseError:
bucket = conn.create_bucket(self.AWS_BUCKET_NAME)
return bucket, boto.s3.key.Key(bucket)
def upload_s3(self, arg, dirname, names):
"""
This is the callback to os.path.walk and where much of the work happens
"""
bucket, key, bucket_name, root_dir = arg # expand arg tuple
if not root_dir.endswith('/'):
self.prefix = root_dir.split('/')[-1]
root_dir = root_dir + '/'
for file in names:
headers = {}
if file in self.FILTER_LIST:
continue # Skip files we don't want to sync
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
continue # Don't uplaod directories
breakout = 0
for f in self.FILTER_LIST:
if f in filename:
breakout = 1 # Don't upload anything relating to filter_list
if breakout:
continue
file_key = filename[len(root_dir):]
if self.prefix:
file_key = "%s/%s" % (self.prefix, file_key)
# Check if file on S3 is older than local file, if so, upload
if not self.do_force:
s3_key = bucket.get_key(file_key)
if s3_key:
s3_datetime = datetime.datetime(*time.strptime(
s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6])
local_datetime = datetime.datetime.utcfromtimestamp(
os.stat(filename).st_mtime)
if local_datetime < s3_datetime:
self.skip_count += 1
if self.verbosity > 1:
print "File %s hasn't been modified since last " \
"being uploaded" % (file_key)
continue
# File is newer, let's process and upload
if self.verbosity > 0:
print "Uploading %s..." % (file_key)
content_type = mimetypes.guess_type(filename)[0]
if content_type:
headers['Content_Type'] = content_type
file_obj = open(filename, 'rb')
file_size = os.fstat(file_obj.fileno()).st_size
filedata = file_obj.read()
if self.do_gzip:
# Gzipping only if file is large enough (>1K is recommended)
# and only if file is a common text type (not a binary file)
if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES:
filedata = self.compress_string(filedata)
headers['Content-Encoding'] = 'gzip'
if self.verbosity > 1:
print "\tgzipped: %dk to %dk" % \
(file_size/1024, len(filedata)/1024)
if self.do_expires:
# HTTP/1.0
headers['Expires'] = '%s GMT' % (email.Utils.formatdate(
time.mktime((datetime.datetime.now() +
datetime.timedelta(days=365*2)).timetuple())))
# HTTP/1.1
headers['Cache-Control'] = 'max-age %d' % (3600 * 24 * 365 * 2)
if self.verbosity > 1:
print "\texpires: %s" % (headers['Expires'])
print "\tcache-control: %s" % (headers['Cache-Control'])
try:
key.name = file_key
key.set_contents_from_string(filedata, headers, replace=True)
key.make_public()
except boto.s3.connection.S3CreateError, e:
print "Failed: %s" % e
except Exception, e:
print e
raise
else:
self.upload_count += 1
file_obj.close()
def run(self):
# upload all files found.
self.sync_s3()
if self.delete:
self.del_s3()
if not self.quiet:
print "%d files uploaded." % (self.upload_count)
print "%d files skipped." % (self.skip_count)
if not self.quiet and self.delete:
print "%d files deleted." % (self.del_count)
def main(argv):
AWS_KEY = None
AWS_SECRET = None
verbose = False
force = False
quiet = False
delete = False
# Parse Options
try:
opt, args = getopt.getopt(argv, "hd:b:K:S:vfdq", ["help", "directory=", "bucket=", "key=", "secret=", 'verbose', 'force', 'delete', 'quiet',])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for o, a in opt:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-d", "--directory"):
SYNC_DIR = a
elif o in ("-b", "--bucket"):
BUCKET = a
elif o in ("-K", "--key"):
AWS_KEY = a
elif o in ("-S", "--secret"):
AWS_SECRET = a
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-f", "--force"):
force = True
elif o in ("-d", "--delete"):
delete = True
elif o in ("-q", "--queit"):
quiet = True
else:
assert False, "unhandled option"
# Check for AWS Keys
if not AWS_KEY:
AWS_KEY = os.getenv("AWS_ACCESS_KEY_ID")
if not AWS_SECRET:
AWS_SECRET = os.getenv("AWS_SECRET_ACCESS_KEY")
if not AWS_KEY or not AWS_SECRET:
print "Missing AWS Keys"
print usage()
sys.exit(2)
# Start processing
mys3 = S3Sync(verbose, force, quiet, delete, S3_SYNC_DIR=SYNC_DIR, S3_BUCKET=BUCKET, KEY=AWS_KEY, SECRET=AWS_SECRET)
mys3.run()
def usage():
usage = """
-h --help Prints this
-d --directory Directory To Sync to S3
-b --bucket S3 Bucket to sync to
-K --key AWS Access Key
-S --secret AWS Secret Access Key
-v --verbose Verbose Output
-f --force Force upload of Everything regardless of age
-d --delete Delete S3 if file is not local
-q --quiet No File totals, completly quiet
AWS Keys can be in environment variables as well under:
AWS_ACCESS_KEY_ID = <access key>
AWS_SECRET_ACCESS_KEY = <secret access key>
"""
print usage
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
eunchong/build | third_party/twisted_10_2/twisted/protocols/wire.py | 62 | 2369 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
# system imports
import time, struct
from zope.interface import implements
# twisted import
from twisted.internet import protocol, interfaces
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
implements(interfaces.IProducer)
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
| bsd-3-clause |
muxi/grpc | src/python/grpcio/grpc/experimental/session_cache.py | 27 | 1533 | # Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gRPC's APIs for TLS Session Resumption support"""
from grpc._cython import cygrpc as _cygrpc
def ssl_session_cache_lru(capacity):
"""Creates an SSLSessionCache with LRU replacement policy
Args:
capacity: Size of the cache
Returns:
An SSLSessionCache with LRU replacement policy that can be passed as a value for
the grpc.ssl_session_cache option to a grpc.Channel. SSL session caches are used
to store session tickets, which clients can present to resume previous TLS sessions
with a server.
"""
return SSLSessionCache(_cygrpc.SSLSessionCacheLRU(capacity))
class SSLSessionCache(object):
"""An encapsulation of a session cache used for TLS session resumption.
Instances of this class can be passed to a Channel as values for the
grpc.ssl_session_cache option
"""
def __init__(self, cache):
self._cache = cache
def __int__(self):
return int(self._cache)
| apache-2.0 |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/bsddb/test/test_early_close.py | 34 | 6695 | """TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import os
import unittest
from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
class DBEnvClosedEarlyCrash(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.filename = "test"
def tearDown(self):
test_support.rmtree(self.homeDir)
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d2 = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
self.assertRaises(db.DBNoSuchFileError, d2.open,
self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0666)
d.put("test","this is a test")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
dbenv.close() # This "close" should close the child db handle also
self.assertRaises(db.DBError, d.get, "test")
def test02_close_dbenv_before_dbcursor(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close() # This "close" should close the child db handle also
# db.close should close the child cursor
self.assertRaises(db.DBError,c.next)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
c=d.cursor()
c.first()
c.next()
dbenv.close()
# The "close" should close the child db handle also, with cursors
self.assertRaises(db.DBError, c.next)
def test03_close_db_before_dbcursor_without_env(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close()
# The "close" should close the child db handle also
self.assertRaises(db.DBError, c.next)
def test04_close_massive(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
dbs=[db.DB(dbenv) for i in xrange(16)]
cursors=[]
for i in dbs :
i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs[10].put("test","this is a test")
dbs[10].put("test2","another test")
dbs[10].put("test3","another one")
self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
for i in dbs :
cursors.extend([i.cursor() for j in xrange(32)])
for i in dbs[::3] :
i.close()
for i in cursors[::3] :
i.close()
# Check for missing exception in DB! (after DB close)
self.assertRaises(db.DBError, dbs[9].get, "test")
# Check for missing exception in DBCursor! (after DB close)
self.assertRaises(db.DBError, cursors[101].first)
cursors[80].first()
cursors[80].next()
dbenv.close() # This "close" should close the child db handle also
# Check for missing exception! (after DBEnv close)
self.assertRaises(db.DBError, cursors[80].next)
def test05_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbenv.close() # This "close" should close the child db handle also
del d
try:
import gc
except ImportError:
gc = None
if gc:
# force d.__del__ [DB_dealloc] to be called
gc.collect()
def test06_close_txn_before_dup_cursor(self) :
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_CREATE)
d = db.DB(dbenv)
txn = dbenv.txn_begin()
if db.version() < (4,1) :
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE)
else :
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
txn=txn)
d.put("XXX", "yyy", txn=txn)
txn.commit()
txn = dbenv.txn_begin()
c1 = d.cursor(txn)
c2 = c1.dup()
self.assertEquals(("XXX", "yyy"), c1.first())
import warnings
# Not interested in warnings about implicit close.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
txn.commit()
self.assertRaises(db.DBCursorClosedError, c2.first)
if db.version() > (4,3,0) :
def test07_close_db_before_sequence(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs=db.DBSequence(d)
d.close() # This "close" should close the child DBSequence also
dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 |
sebastien-forestier/pydmps | pydmps/dmp.py | 3 | 7418 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from cs import CanonicalSystem
class DMPs(object):
"""Implementation of Dynamic Motor Primitives,
as described in Dr. Stefan Schaal's (2002) paper."""
def __init__(self, dmps, bfs, dt=.01,
y0=0, goal=1, w=None,
ay=None, by=None, **kwargs):
"""
dmps int: number of dynamic motor primitives
bfs int: number of basis functions per DMP
dt float: timestep for simulation
y0 list: initial state of DMPs
goal list: goal state of DMPs
w list: tunable parameters, control amplitude of basis functions
ay int: gain on attractor term y dynamics
by int: gain on attractor term y dynamics
"""
self.dmps = dmps
self.bfs = bfs
self.dt = dt
if isinstance(y0, (int, float)):
y0 = np.ones(self.dmps)*y0
self.y0 = y0
if isinstance(goal, (int, float)):
goal = np.ones(self.dmps)*goal
self.goal = goal
if w is None:
# default is f = 0
w = np.zeros((self.dmps, self.bfs))
self.w = w
if ay is None: ay = np.ones(dmps) * 25. # Schaal 2012
self.ay = ay
if by is None: by = self.ay.copy() / 4. # Schaal 2012
self.by = by
# set up the CS
self.cs = CanonicalSystem(dt=self.dt, **kwargs)
self.timesteps = int(self.cs.run_time / self.dt)
# set up the DMP system
self.reset_state()
def check_offset(self):
"""Check to see if initial position and goal are the same
if they are, offset slightly so that the forcing term is not 0"""
for d in range(self.dmps):
if (self.y0[d] == self.goal[d]):
self.goal[d] += 1e-4
def gen_front_term(self, x, dmp_num): raise NotImplementedError()
def gen_goal(self, y_des): raise NotImplementedError()
def gen_psi(self): raise NotImplementedError()
def gen_weights(self, f_target): raise NotImplementedError()
def imitate_path(self, y_des):
"""Takes in a desired trajectory and generates the set of
system parameters that best realize this path.
y_des list/array: the desired trajectories of each DMP
should be shaped [dmps, run_time]
"""
# set initial state and goal
if y_des.ndim == 1:
y_des = y_des.reshape(1,len(y_des))
self.y0 = y_des[:,0].copy()
self.y_des = y_des.copy()
self.goal = self.gen_goal(y_des)
self.check_offset()
if not (self.timesteps == y_des.shape[1]):
# generate function to interpolate the desired trajectory
import scipy.interpolate
path = np.zeros((self.dmps, self.timesteps))
x = np.linspace(0, self.cs.run_time, y_des.shape[1])
for d in range(self.dmps):
path_gen = scipy.interpolate.interp1d(x, y_des[d])
for t in range(self.timesteps):
path[d, t] = path_gen(t * self.dt)
y_des = path
# calculate velocity of y_des
dy_des = np.diff(y_des) / self.dt
# add zero to the beginning of every row
dy_des = np.hstack((np.zeros((self.dmps, 1)), dy_des))
# calculate acceleration of y_des
ddy_des = np.diff(dy_des) / self.dt
# add zero to the beginning of every row
ddy_des = np.hstack((np.zeros((self.dmps, 1)), ddy_des))
f_target = np.zeros((y_des.shape[1], self.dmps))
# find the force required to move along this trajectory
for d in range(self.dmps):
f_target[:,d] = ddy_des[d] - self.ay[d] * \
(self.by[d] * (self.goal[d] - y_des[d]) - \
dy_des[d])
# efficiently generate weights to realize f_target
self.gen_weights(f_target)
'''# plot the basis function activations
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(psi_track)
plt.title('psi_track')
# plot the desired forcing function vs approx
plt.subplot(212)
plt.plot(f_target[:,0])
plt.plot(np.sum(psi_track * self.w[0], axis=1))
plt.legend(['f_target', 'w*psi'])
plt.tight_layout()
plt.show()'''
self.reset_state()
return y_des
def rollout(self, timesteps=None, **kwargs):
"""Generate a system trial, no feedback is incorporated."""
self.reset_state()
if timesteps is None:
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
# set up tracking vectors
y_track = np.zeros((timesteps, self.dmps))
dy_track = np.zeros((timesteps, self.dmps))
ddy_track = np.zeros((timesteps, self.dmps))
for t in range(timesteps):
y, dy, ddy = self.step(**kwargs)
# record timestep
y_track[t] = y
dy_track[t] = dy
ddy_track[t] = ddy
return y_track, dy_track, ddy_track
def reset_state(self):
"""Reset the system state"""
self.y = self.y0.copy()
self.dy = np.zeros(self.dmps)
self.ddy = np.zeros(self.dmps)
self.cs.reset_state()
def step(self, tau=1.0, state_fb=None):
"""Run the DMP system for a single timestep.
tau float: scales the timestep
increase tau to make the system execute faster
state_fb np.array: optional system feedback
"""
# run canonical system
cs_args = {'tau':tau,
'error_coupling':1.0}
if state_fb is not None:
# take the 2 norm of the overall error
state_fb = state_fb.reshape(1,self.dmps)
dist = np.sqrt(np.sum((state_fb - self.y)**2))
cs_args['error_coupling'] = 1.0 / (1.0 + 10*dist)
x = self.cs.step(**cs_args)
# generate basis function activation
psi = self.gen_psi(x)
for d in range(self.dmps):
# generate the forcing term
f = self.gen_front_term(x, d) * \
(np.dot(psi, self.w[d])) / np.sum(psi) if self.bfs > 0. else 0.
# DMP acceleration
self.ddy[d] = (self.ay[d] *
(self.by[d] * (self.goal[d] - self.y[d]) - \
self.dy[d]/tau) + f) * tau
self.dy[d] += self.ddy[d] * tau * self.dt * cs_args['error_coupling']
self.y[d] += self.dy[d] * self.dt * cs_args['error_coupling']
return self.y, self.dy, self.ddy
| gpl-3.0 |
monapasan/bachelor-thesis | tests/test_GroupDataset.py | 1 | 2419 | """Test GroupDataset module."""
from .context import GroupDataset
from .context import IndexGenerator
# from nose.tools import raises
from tensorflow.examples.tutorials.mnist import input_data
mnist_raw = input_data.read_data_sets('../data/MNIST_data', one_hot=True)
MNIST_size = 28
images_per_sample = 5
amount_of_classes = 3
MNIST_classes_n = 10
def init_indexGenerator():
"""Initilise IndexGenerator for testing purposes."""
class_amount = 3
noise_sizes = [4, 3, 2]
# myIndexGenerator.get_indexes(1) -->
# this should return an array of shape [1, class_amount, noise_sizes]
# [[1,5,3,1], [1,5,3], [3, 4]]
return IndexGenerator(noise_sizes, class_amount)
def init_raw_dataset():
"""Initilise the GroupDataset for testing purposes."""
noise_label_index = [1, 2]
# data_label_index = [0]
data_label_index = [0, 3, 4, 5, 6, 7, 8, 9]
n_samples_per_class = [15000, 15000, 15000]
noise_quantity = [4, 3, 2]
return GroupDataset(
init_indexGenerator(), mnist_raw.train, noise_label_index,
data_label_index, amount_of_classes, noise_quantity,
n_samples_per_class, images_per_sample
)
myGroupDataset = init_raw_dataset()
def test_images_shape():
"""Return expected shape of images."""
assert myGroupDataset.images.shape == (45000, 5, 784)
def test_labels_shape():
"""Return expected shape of labels."""
assert myGroupDataset.labels.shape == (45000, )
def test_rawdataset_length():
"""Test whether the GroupDataset has the expected number of examples."""
assert myGroupDataset.num_examples == 45000
def test_get_next_batch_forClass():
"""Test the function `next_batch`.
As GroupDataset is use uniform distribution to choose the indexes
of noise images, we check only the expected shape of return value.
"""
size = 100
images, labels = myGroupDataset.next_batch(size)
assert images.shape == (size, 5, 784)
assert labels.shape == (size, )
# images, labels = myGroupDataset.next_batch_for_class(class_n, size)
# assert images.shape == (size, images_per_sample, MNIST_size * MNIST_size)
# assert labels.shape == (size, images_per_sample, MNIST_classes_n)
# myGroupDataset.next_batch(size = 1)
# [[size x images], [size x labels]]
# images = [images_per_sample x image_size]
# labels = [images_per_sample x label_size]
# {labels: }[[image0,], [image], [image]]
| mit |
StackStorm/st2contrib | archive/packs/smartthings/actions/lib/action.py | 10 | 1350 | from st2actions.runners.pythonrunner import Action
import requests
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.url = None
self.headers = None
def _headers(self):
if not self.headers:
api_token = self.config.get('api_token', None)
if not api_token:
raise ValueError('Missing "api_token" config option')
else:
self.headers = {
"Authorization": "Bearer {}".format(api_token)
}
return self.headers
def _url(self):
if not self.url:
url = self.config.get('api_endpoint', None)
if not url:
raise ValueError('Missing "api_endpoint" config option')
else:
self.url = url
return self.url
def _get(self, endpoint):
url = ''.join([self._url(), endpoint])
headers = self._headers()
return requests.get(url, headers=headers).json()
def _put(self, endpoint, params):
url = ''.join([self._url(), endpoint])
headers = self._headers()
result = requests.put(url, params=params, headers=headers)
if not result.text:
return {"message": "ok"}
else:
return result.json()
| apache-2.0 |
xwliu/Cubietruck_Plus-kernel-source | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
ahmedaljazzar/edx-platform | lms/djangoapps/commerce/utils.py | 2 | 15675 | """Utilities to assist with commerce tasks."""
import json
import logging
from urllib import urlencode
from urlparse import urljoin
import requests
import waffle
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from student.models import CourseEnrollment
from .models import CommerceConfiguration
log = logging.getLogger(__name__)
def is_account_activation_requirement_disabled():
"""
Checks to see if the django-waffle switch for disabling the account activation requirement is active
Returns:
Boolean value representing switch status
"""
switch_name = configuration_helpers.get_value(
'DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH',
settings.DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH
)
return waffle.switch_is_active(switch_name)
class EcommerceService(object):
""" Helper class for ecommerce service integration. """
def __init__(self):
self.config = CommerceConfiguration.current()
@property
def ecommerce_url_root(self):
""" Retrieve Ecommerce service public url root. """
return configuration_helpers.get_value('ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT)
def get_absolute_ecommerce_url(self, ecommerce_page_url):
""" Return the absolute URL to the ecommerce page.
Args:
ecommerce_page_url (str): Relative path to the ecommerce page.
Returns:
Absolute path to the ecommerce page.
"""
return urljoin(self.ecommerce_url_root, ecommerce_page_url)
def get_order_dashboard_url(self):
""" Return the URL to the ecommerce dashboard orders page.
Returns:
String: order dashboard url.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_ORDER_DASHBOARD_URL)
def get_receipt_page_url(self, order_number):
"""
Gets the URL for the Order Receipt page hosted by the ecommerce service.
Args:
order_number (str): Order number.
Returns:
Receipt page for the specified Order.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_RECEIPT_PAGE_URL + order_number)
def is_enabled(self, user):
"""
Determines the availability of the EcommerceService based on user activation and service configuration.
Note: If the user is anonymous we bypass the user activation gate and only look at the service config.
Returns:
Boolean
"""
user_is_active = user.is_active or is_account_activation_requirement_disabled()
allow_user = user_is_active or user.is_anonymous
return allow_user and self.config.checkout_on_ecommerce_service
def payment_page_url(self):
""" Return the URL for the checkout page.
Example:
http://localhost:8002/basket/add/
"""
return self.get_absolute_ecommerce_url(self.config.basket_checkout_page)
def get_checkout_page_url(self, *skus, **kwargs):
""" Construct the URL to the ecommerce checkout page and include products.
Args:
skus (list): List of SKUs associated with products to be added to basket
program_uuid (string): The UUID of the program, if applicable
Returns:
Absolute path to the ecommerce checkout page showing basket that contains specified products.
Example:
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD&bundle=3bdf1dd1-49be-4a15-9145-38901f578c5a
"""
program_uuid = kwargs.get('program_uuid')
enterprise_catalog_uuid = kwargs.get('catalog')
query_params = {'sku': skus}
if enterprise_catalog_uuid:
query_params.update({'catalog': enterprise_catalog_uuid})
url = '{checkout_page_path}?{query_params}'.format(
checkout_page_path=self.get_absolute_ecommerce_url(self.config.basket_checkout_page),
query_params=urlencode(query_params, doseq=True),
)
if program_uuid:
url = '{url}&bundle={program_uuid}'.format(
url=url,
program_uuid=program_uuid
)
return url
def upgrade_url(self, user, course_key):
"""
Returns the URL for the user to upgrade, or None if not applicable.
"""
enrollment = CourseEnrollment.get_enrollment(user, course_key)
verified_mode = enrollment.verified_mode if enrollment else None
if verified_mode:
if self.is_enabled(user):
return self.get_checkout_page_url(verified_mode.sku)
else:
return reverse('verify_student_upgrade_and_verify', args=(course_key,))
return None
def refund_entitlement(course_entitlement):
"""
Attempt a refund of a course entitlement. Verify the User before calling this refund method
Returns:
bool: True if the Refund is successfully processed.
"""
user_model = get_user_model()
enrollee = course_entitlement.user
entitlement_uuid = str(course_entitlement.uuid)
if not is_commerce_service_configured():
log.error(
'Ecommerce service is not configured, cannot refund for user [%s], course entitlement [%s].',
enrollee.id,
entitlement_uuid
)
return False
service_user = user_model.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info(
'Attempting to create a refund for user [%s], course entitlement [%s]...',
enrollee.id,
entitlement_uuid
)
try:
refund_ids = api_client.refunds.post(
{
'order_number': course_entitlement.order_number,
'username': enrollee.username,
'entitlement_uuid': entitlement_uuid,
}
)
except Exception as exc: # pylint: disable=broad-except
# Catch any possible exceptions from the Ecommerce service to ensure we fail gracefully
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], "
"course entitlement [%s] message: [%s]",
enrollee.id,
course_entitlement.uuid,
str(exc)
)
return False
if refund_ids:
log.info(
'Refund successfully opened for user [%s], course entitlement [%s]: %r',
enrollee.id,
entitlement_uuid,
refund_ids,
)
return _process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_entitlement.mode,
user=enrollee,
always_notify=True,
)
else:
log.warn('No refund opened for user [%s], course entitlement [%s]', enrollee.id, entitlement_uuid)
return False
def refund_seat(course_enrollment, change_mode=False):
"""
Attempt to initiate a refund for any orders associated with the seat being unenrolled,
using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
change_mode (Boolean): change the course mode to free mode or not
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during communication with the E-Commerce Service.
exceptions.Timeout: if the attempt to reach the commerce service timed out.
"""
User = get_user_model() # pylint:disable=invalid-name
course_key_str = unicode(course_enrollment.course_id)
enrollee = course_enrollment.user
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info('Attempting to create a refund for user [%s], course [%s]...', enrollee.id, course_key_str)
refund_ids = api_client.refunds.post({'course_id': course_key_str, 'username': enrollee.username})
if refund_ids:
log.info('Refund successfully opened for user [%s], course [%s]: %r', enrollee.id, course_key_str, refund_ids)
_process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_enrollment.mode,
user=enrollee,
)
if change_mode and CourseMode.can_auto_enroll(course_id=CourseKey.from_string(course_key_str)):
course_enrollment.update_enrollment(mode=CourseMode.auto_enroll_mode(course_id=course_key_str),
is_active=False, skip_refund=True)
course_enrollment.save()
else:
log.info('No refund opened for user [%s], course [%s]', enrollee.id, course_key_str)
return refund_ids
def _process_refund(refund_ids, api_client, mode, user, always_notify=False):
"""
Helper method to process a refund for a given course_product. This method assumes that the User has already
been unenrolled.
Arguments:
refund_ids: List of refund ids to be processed
api_client: The API Client used in the processing of refunds
mode: The mode that the refund should be processed for
user: The user that the refund is being processed for
always_notify (bool): This will enable always notifying support with Zendesk tickets when
an approval is required
Returns:
bool: True if the refund process was successful, False if there are any Errors that are not handled
"""
config = CommerceConfiguration.current()
if config.enable_automatic_refund_approval:
refunds_requiring_approval = []
for refund_id in refund_ids:
try:
# NOTE: The following assumes that the user has already been unenrolled.
# We are then able to approve payment. Additionally, this ensures we don't tie up an
# additional web worker when the E-Commerce Service tries to unenroll the learner.
api_client.refunds(refund_id).process.put({'action': 'approve_payment_only'})
log.info('Refund [%d] successfully approved.', refund_id)
except: # pylint: disable=bare-except
# Push the refund to Support to process
log.exception('Failed to automatically approve refund [%d]!', refund_id)
refunds_requiring_approval.append(refund_id)
else:
refunds_requiring_approval = refund_ids
if refunds_requiring_approval:
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if mode != 'verified' and not always_notify:
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
'Skipping refund support notification for non-verified mode for user [%s], mode: [%s]',
user.id,
mode,
)
else:
try:
return _send_refund_notification(user, refunds_requiring_approval)
except: # pylint: disable=bare-except
# Unable to send notification to Support, do not break as this method is used by Signals
log.warning('Could not send support notification for refund.', exc_info=True)
return False
return True
def _send_refund_notification(user, refund_ids):
"""
Notify the support team of the refund request.
Returns:
bool: True if we are able to send the notification. In this case that means we were able to create
a ZenDesk ticket
"""
tags = ['auto_refund']
if theming_helpers.is_request_in_themed_site():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to support teams.")
# Build the information for the ZenDesk ticket
student = user
subject = _("[Refund] User-Requested Refund")
body = _generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
return create_zendesk_ticket(requester_name, student.email, subject, body, tags)
def _generate_refund_notification_body(student, refund_ids):
""" Returns a refund notification message body. """
msg = _(
'A refund request has been initiated for {username} ({email}). '
'To process this request, please visit the link(s) below.'
).format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(
'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT,
)
refund_urls = [urljoin(ecommerce_url_root, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
# emails contained in this message could contain unicode characters so encode as such
return u'{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
"""
Create a Zendesk ticket via API.
Returns:
bool: False if we are unable to create the ticket for any reason
"""
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.error('Zendesk is not configured. Cannot create a ticket.')
return False
# Copy the tags to avoid modifying the original list.
tags = set(tags or [])
tags.add('LMS')
tags = list(tags)
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': unicode(requester_email)
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error('Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
return False
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return False
return True
| agpl-3.0 |
NMGRL/pychron | pychron/hardware/core/motion/motion_designer.py | 2 | 6691 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Range, Instance, Property
from traitsui.api import View, Item, Group
# ============= standard library imports ========================
from numpy import linspace
# ============= local library imports ==========================
from pychron.graph.graph import Graph
from pychron.hardware.core.motion.motion_profiler import MotionProfiler
class MotionDesigner(HasTraits):
canvas = Instance(Graph)
acceleration = Property(Range(0, 8., 7.62), depends_on='_acceleration')
_acceleration = Range(0, 8., 7.62)
# deceleration = Range(0, 8., 7.62)
velocity = Property(Range(0, 8., 3.81), depends_on='_velocity')
_velocity = Range(0, 8., 3.81)
distance = Property(Range(0, 10., 5), depends_on='_distance')
_distance = Range(0, 10., 5)
# beam_radius = Range(0, 1.5, 1)
def _set_velocity(self, v):
self._velocity = v
def _set_distance(self, d):
self._distance = d
mp = MotionProfiler()
cv, ac, dc = mp.calculate_corrected_parameters(self.distance, self.velocity, self.acceleration, self.acceleration)
times, dist = mp.calculate_transit_parameters(self.distance, cv, ac, dc)
self._acceleration = ac
self._velocity = cv
self.plot_velocity_profile(times, cv, 0)
self.plot_position_profile(*times)
def _set_acceleration(self, a):
self._acceleration = a
def _get_velocity(self):
return self._velocity
def _get_distance(self):
return self._distance
def _get_acceleration(self, a):
return self._acceleration
# def _anytrait_changed(self, name, old, new):
# if name in ['acceleration', 'deceleration', 'velocity',
# # 'distance',
# #'beam_radius'
# ]:
# self.replot()
# elif name in ['distance']:
# mp=MotionProfiler()
# cv,ac,dc=mp.calculate_corrected_parameters(self.distance, self.velocity, self.acceleration, self.acceleration)
# times, dist=mp.calculate_transit_parameters(self.distance, cv, ac, dc)
#
# self.plot_velocity_profile(times,cv, 0)
# self.plot_position_profile(*times, 1)
def replot(self):
g = self.canvas
g.clear()
g.new_plot(title='Velocity')
g.new_plot(title='Position')
atime, dtime, vtime = self.velocity_profile(0)
self.plot_position_profile(atime, dtime, vtime, 1)
def plot_position_profile(self, atime, dtime, vtime, ploitid=1):
g = self.canvas
x = [0]
y = [0]
# plot accel
for i in linspace(0, atime, 50):
x.append(i)
p = 0.5 * self.acceleration * i ** 2
y.append(p)
# plot constant velocity
yo = p + vtime * self.velocity
# plot decel
for i in linspace(0, dtime, 50):
x.append(atime + vtime + i)
# p = yo + self.velocity * i - 0.5 * self.deceleration * i ** 2
p = yo + self.velocity * i - 0.5 * self.acceleration * i ** 2
y.append(p)
g.new_series(x, y, render_style='connectedpoints')
# plot beam center
# y = [p / 2.0] * 50
# x = linspace(0, atime + vtime + dtime, 50)
# g.new_series(x, y)
# plot beam radius'
# include padding in the beam radius
# yl = [pi - self.beam_radius for pi in y]
# yu = [pi + self.beam_radius for pi in y]
# g.new_series(x, yl, color='blue')
# g.new_series(x, yu, color='blue')
def velocity_profile(self, plotid):
# v = self.velocity
# ac = self.acceleration
# dc = self.deceleration
d = self.distance
m = MotionProfiler()
times, dists = m.calculate_transit_parameters(d, self.velocity,
self.acceleration,
self.acceleration)
self.plot_velocity_profile(times, self.velocity, plotid)
return times
def plot_velocity_profile(self, times, v, plotid):
g = self.canvas
atime, dtime, vtime = times
# error, atime, dtime, cvd = m.check_motion(v, ac, d)
x = [0]
y = [0]
# atime = v / float(ac)
# dtime = v / float(dc)
x.append(atime)
y.append(v)
# acd = 0.5 * ac * atime ** 2
# dcd = 0.5 * ac * dtime ** 2
#
# cvd = d - acd - dcd
#
# if cvd < 0:
# #calculate a corrected velocity
# vc = math.sqrt((2 * d * ac) / 3.)
# print vc
x.append(atime + vtime)
y.append(v)
#
totaltime = atime + dtime + vtime
x.append(totaltime)
y.append(0)
g.new_series(x, y, plotid=plotid, render_style='connectedpoints')
g.set_y_limits(plotid=plotid, max_=self.velocity + 5)
return atime, dtime, vtime
# ============= views ===================================
def traits_view(self):
cgrp = Group(
Item('acceleration'),
# Item('deceleration'),
Item('velocity'),
Item('distance'),
# Item('beam_radius')
)
v = View(
cgrp,
Item('canvas', show_label=False,
style='custom'),
resizable=True,
width=800,
height=700
)
return v
def _canvas_default(self):
g = Graph()
return g
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('motionprofiler')
m = MotionDesigner()
m.replot()
m.configure_traits()
# ============= EOF ====================================
| apache-2.0 |
mgaffney/avro | lang/c/jansson/doc/ext/refcounting.py | 4 | 1706 | """
refcounting
~~~~~~~~~~~
Reference count annotations for C API functions. Has the same
result as the sphinx.ext.refcounting extension but works for all
functions regardless of the signature, and the reference counting
information is written inline with the documentation instead of a
separate file.
Adds a new directive "refcounting". The directive has no content
and one required positional parameter:: "new" or "borrow".
Example:
.. cfunction:: json_t *json_object(void)
.. refcounting:: new
<description of the json_object function>
:copyright: Copyright 2009 Petri Lehtinen <petri@digip.org>
:license: MIT, see LICENSE for details.
"""
from docutils import nodes
class refcounting(nodes.emphasis): pass
def visit(self, node):
self.visit_emphasis(node)
def depart(self, node):
self.depart_emphasis(node)
def html_visit(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='refcount'))
def html_depart(self, node):
self.body.append('</em>')
def refcounting_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if arguments[0] == 'borrow':
text = 'Return value: Borrowed reference.'
elif arguments[0] == 'new':
text = 'Return value: New reference.'
else:
raise Error('Valid arguments: new, borrow')
return [refcounting(text, text)]
def setup(app):
app.add_node(refcounting,
html=(html_visit, html_depart),
latex=(visit, depart),
text=(visit, depart))
app.add_directive('refcounting', refcounting_directive, 0, (1, 0, 0))
| apache-2.0 |
OpenSourcePolicyCenter/webapp-public | webapp/apps/dynamic/models.py | 2 | 1792 | import datetime
from django.db import models
from django.core.urlresolvers import reverse
from ..core.models import CoreRun
from django.utils.timezone import make_aware
from ..taxbrain.models import TaxBrainRun
from ..taxbrain.behaviors import DataSourceable
class DynamicElasticitySaveInputs(DataSourceable, models.Model):
"""
This model contains all the parameters for the dynamic elasticity
wrt GDP dynamic macro model and tax result
"""
# Elasticity of GDP w.r.t. average marginal tax rates
elastic_gdp = models.CharField(default="0.0", blank=True, null=True,
max_length=50)
# Starting Year of the reform calculation
first_year = models.IntegerField(default=None, null=True)
creation_date = models.DateTimeField(
default=make_aware(datetime.datetime(2015, 1, 1))
)
micro_run = models.ForeignKey(TaxBrainRun, blank=True, null=True,
on_delete=models.SET_NULL)
class Meta:
permissions = (
("view_inputs", "Allowed to view Taxbrain."),
)
class TaxBrainElastRun(CoreRun):
inputs = models.OneToOneField(DynamicElasticitySaveInputs,
related_name='outputs')
def get_absolute_url(self):
kwargs = {
'pk': self.pk
}
return reverse('elast_output_detail', kwargs=kwargs)
def get_absolute_edit_url(self):
kwargs = {
'pk': self.pk
}
return reverse('edit_dynamic_elastic', kwargs=kwargs)
def get_absolute_download_url(self):
kwargs = {
'pk': self.pk
}
return reverse('elast_download_outputs', kwargs=kwargs)
def zip_filename(self):
return 'taxbrain_macro_elastic.zip'
| mit |
jeffkit/goldencage | goldencage/tests.py | 1 | 31570 | # encoding=utf-8
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.cache import cache
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User
from django.test.utils import override_settings
import hashlib
import time
from mock import Mock
import simplejson as json
import random
from hashlib import sha1
from goldencage import views
from goldencage import config
from goldencage.models import task_done
from goldencage.models import appwalllog_done
from goldencage.models import payment_done
from goldencage.models import apply_coupon
from goldencage.models import AppWallLog
from goldencage.models import Charge
from goldencage.models import ChargePlan
from goldencage.models import Task
from goldencage.models import Order
from goldencage.models import Coupon
from goldencage.models import Exchange
@skipIfCustomUser
class CouponModelTest(TestCase):
"""测试优惠券生成与验证。
生成:
- 如果有次数限制
如果完成了的次数已达到限制,返回空
- 无次数限制或次数未到。
有未使用券,则直接使用
无未使用券,生成新的。
验证:
- 无券,返回False
- 有券,但已完成,返回False
- 有券,未完成,返回True,同时发出signal
"""
def test_generate_normal(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
exc = coupon.generate(user)
self.assertIsNotNone(exc)
def test_generate_duplidate(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
vera = User.objects.create_user('vera', 'jeff@toraysoft.com', '123')
exc = Exchange(coupon=coupon, user=user, cost=10, status='WAITING',
exchange_code='1233')
exc.save()
e = coupon.generate(vera, default=1233)
self.assertNotEquals(e.exchange_code, '1233')
def test_generate_limit(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
exc = Exchange(coupon=coupon, user=user, cost=10, status='DONE',
exchange_code='1233')
exc.save()
e = coupon.generate(user)
self.assertIsNone(e)
def test_generate_reuse(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
exc = Exchange(coupon=coupon, user=user, cost=10, status='WAITING',
exchange_code='1233')
exc.save()
e = coupon.generate(user)
self.assertIsNotNone(e)
self.assertEqual('1233', e.exchange_code)
def test_valid_notfound(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
result = coupon.validate('1233')
self.assertFalse(result)
def test_valid_duplicate(self):
coupon = Coupon(name='test', cost=10, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
exc = Exchange(coupon=coupon, user=user, cost=10, status='DONE',
exchange_code='1233')
exc.save()
result = coupon.validate('1233')
self.assertFalse(result)
def test_valid_normal(self):
coupon = Coupon(name='test', cost=20, limit=1,
key='test')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
exc = Exchange(coupon=coupon, user=user, cost=10, status='WAITING',
exchange_code='1233')
exc.save()
apply_coupon.send = Mock()
result = coupon.validate('1233')
self.assertEqual(result.status, 'DONE')
apply_coupon.send.assert_called_with(sender=Coupon, instance=coupon,
cost=20, user=user)
class OrderModelTest(TestCase):
def test_get_real_id_without_prefix(self):
self.assertEqual(999999999, Order.get_real_id(999999999))
@override_settings(GOLDENCAGE_ORDER_ID_PREFIX=9)
def test_get_real_id_prefix(self):
self.assertEqual(999, Order.get_real_id(900000999))
def test_get_order_id(self):
order = Order()
order.id = 100
gid = order.gen_order_id()
self.assertEqual(100, gid)
@override_settings(GOLDENCAGE_ORDER_ID_PREFIX=9)
def test_gen_order_id_prefix(self):
order = Order()
order.id = 100
gid = order.gen_order_id()
self.assertEqual(900000100, gid)
@override_settings(GOLDENCAGE_ORDER_ID_PREFIX=9)
def test_gen_order_id_prefix_repeat(self):
order = Order()
order.id = 999
gid = order.gen_order_id()
self.assertEqual(900000999, gid)
@skipIfCustomUser
class TaskModelTest(TestCase):
def test_make_log_random(self):
# 测试随机金币
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
task = Task(name='check in', key='check_in',
cost=10, cost_max=100)
task.save()
log = task.make_log(user)
assert log.cost >= 10 and log.cost <= 100
def test_make_log_infinity(self):
# 测试随机金币
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
task = Task(name='check in', key='check_in',
cost=10)
task.save()
log = task.make_log(user)
self.assertEqual(10, log.cost)
log = task.make_log(user)
self.assertEqual(10, log.cost)
@skipIfCustomUser
class AppWallCallbackTest(TestCase):
def test_waps_callback(self):
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = {'adv_id': '1', 'app_id': 'theme',
'key': user.pk, 'udid': 'myudid',
'openudid': 'myopenid', 'bill': '2.0',
'points': 200, 'ad_name': 'music talk'
}
appwalllog_done.send = Mock()
c = Client()
rsp = c.get(reverse('wall_cb', args=['waps']), data)
self.assertEqual(rsp.status_code, 200)
appwalllog_done.send.assert_called_with(cost=200, user=user,
sender=AppWallLog)
def test_waps_callback_duplicate(self):
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = {'adv_id': '1', 'app_id': 'theme',
'key': user.pk, 'udid': 'myudid',
'openudid': 'myopenid', 'bill': '2.0',
'points': 200, 'ad_name': 'music talk'
}
c = Client()
rsp = c.get(reverse('wall_cb', args=['waps']), data)
self.assertEqual(rsp.status_code, 200)
dt = json.loads(rsp.content)
self.assertTrue(dt['success'])
rsp = c.get(reverse('wall_cb', args=['waps']), data)
self.assertEqual(rsp.status_code, 200)
dt = json.loads(rsp.content)
self.assertFalse(dt['success'])
def test_waps_callback_invalid_ip(self):
c = Client(REMOTE_ADDR='192.168.0.1')
rsp = c.get(reverse('wall_cb', args=['waps']))
self.assertEqual(rsp.status_code, 405)
def create_youmi_ios_data(self, user):
ts = int(time.time())
return {'order': 'NO.1', 'app': 'my appid',
'adid': '1', 'user': user.pk,
'device': 'mydevice', 'chn': 0,
'price': '4.9', 'points': 90,
'time': ts, 'sig': 'xdref', 'ad': 'musictalk'
}
def test_youmi_ios_callback(self):
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = self.create_youmi_ios_data(user)
keys = data.keys()
keys.sort()
appwalllog_done.send = Mock()
src = ''.join(['%s=%s' % (k, unicode(data[k]).encode('utf-8'))
for k in keys])
src += settings.YOUMI_CALLBACK_SECRET
md5 = hashlib.md5()
md5.update(src.encode('utf-8'))
sign = md5.hexdigest()
data['sign'] = sign
c = Client()
rsp = c.get(reverse('wall_cb', args=['youmi_ios']), data)
self.assertEqual(rsp.status_code, 200)
appwalllog_done.send.assert_called_with(sender=AppWallLog,
cost=90, user=user)
def test_youmi_ios_missing_sign(self):
c = Client()
rsp = c.get(reverse('wall_cb', args=['youmi_ios']))
self.assertEqual(rsp.status_code, 403)
def test_youmi_ios_invalidate_sign(self):
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = self.create_youmi_ios_data(user)
data['sign'] = 'not a valid sign'
c = Client()
rsp = c.get(reverse('wall_cb', args=['youmi_ios']), data)
self.assertEqual(rsp.status_code, 403)
def test_youmi_ios_duplicate(self):
"""同一个订单提交两次"""
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = self.create_youmi_ios_data(user)
keys = data.keys()
keys.sort()
src = ''.join(['%s=%s' % (k, unicode(data[k]).encode('utf-8'))
for k in keys])
src += settings.YOUMI_CALLBACK_SECRET
md5 = hashlib.md5()
md5.update(src.encode('utf-8'))
sign = md5.hexdigest()
data['sign'] = sign
c = Client()
rsp = c.get(reverse('wall_cb', args=['youmi_ios']), data)
self.assertEqual(rsp.status_code, 200)
# user = User.custom_objects.get(name=user.name)
# self.assertEqual(user.balance, 90)
rsp = c.get(reverse('wall_cb', args=['youmi_ios']), data)
self.assertEqual(rsp.status_code, 403)
def create_dianjoy_adr_data(self, user):
ts = int(time.time())
return {'snuid': user.pk, 'device_id': 'my device',
'app_id': 'helper', 'currency': 100,
'app_ratio': 1, 'time_stamp': ts,
'ad_name': '医生', 'pack_name': 'com.toraysoft.music',
'trade_type': 1,
}
def test_dianjoy_adr_invalid_token(self):
"""点乐:无效token
"""
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = self.create_dianjoy_adr_data(user)
data['token'] = 'not a valid token'
c = Client()
rsp = c.get(reverse('wall_cb', args=['dianjoy_adr']), data)
self.assertEqual(rsp.status_code, 403)
def test_dianjoy_adr_success(self):
"""点乐,有效
"""
appwalllog_done = Mock()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
data = self.create_dianjoy_adr_data(user)
src = str(data['time_stamp']) + \
settings.GOLDENCAGE_DIANJOY_ANDROID_SECRET
md5 = hashlib.md5()
md5.update(src.encode('utf-8'))
sign = md5.hexdigest()
data['token'] = sign
c = Client()
rsp = c.get(reverse('wall_cb', args=['dianjoy_adr']), data)
self.assertEqual(rsp.status_code, 200)
self.assertEqual(rsp.content, '200')
appwalllog_done.assert_called()
appwalllog_done = Mock()
rsp = c.get(reverse('wall_cb', args=['dianjoy_adr']), data)
self.assertEqual(rsp.status_code, 200)
self.assertEqual(rsp.content, 'OK, But duplicate item')
self.assertFalse(appwalllog_done.called)
def create_domob_data(self, user):
ts = int(time.time())
return {'user': user.pk, 'orderid': 'orderid',
'pubid': 'pubid', 'ad': 'ad',
'adid': 112, 'device': 'device',
'channel': 1, 'price': '11',
'point': 10, 'ts': ts, 'pkg': 'pkg', 'action': 1,
'action_name': '签到-1',
}
def test_domob_adr(self):
appwalllog_done = Mock()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
params = self.create_domob_data(user)
param_list = sorted(params.iteritems(), key=lambda d: d[0])
sign = ''
for param in param_list:
sign += (str(param[0]) + '=' + str(param[1]))
sign += str(settings.GOLDENCAGE_DOMOB_PRIVATE_KEY_ANDROID)
m = hashlib.md5()
m.update(sign)
sign = m.hexdigest()
params['sign'] = sign
c = Client()
rsp = c.get(reverse('wall_cb', args=['domob_adr']), params)
self.assertEqual(rsp.status_code, 200)
appwalllog_done.assert_called()
rsp = c.get(reverse('wall_cb', args=['domob_adr']), params)
self.assertEqual(rsp.status_code, 200)
self.assertFalse(appwalllog_done.called)
self.assertEqual(rsp.content, 'OK, But Duplicated item')
params['sign'] = 'haha'
rsp = c.get(reverse('wall_cb', args=['domob_adr']), params)
self.assertEqual(rsp.status_code, 403)
self.assertFalse(appwalllog_done.called)
def test_domob_ios(self):
appwalllog_done = Mock()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
params = self.create_domob_data(user)
param_list = sorted(params.iteritems(), key=lambda d: d[0])
sign = ''
for param in param_list:
sign += (str(param[0]) + '=' + str(param[1]))
sign += str(settings.GOLDENCAGE_DOMOB_PRIVATE_KEY_IOS)
m = hashlib.md5()
m.update(sign)
sign = m.hexdigest()
params['sign'] = sign
c = Client()
rsp = c.get(reverse('wall_cb', args=['domob_adr']), params)
self.assertEqual(rsp.status_code, 200)
appwalllog_done.assert_called()
@skipIfCustomUser
class AlipayCallbackTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('jeff',
'jeff@toraysoft.com', '123')
self.plan = ChargePlan(name=u'plan1', code='plan1',
value=3000, cost=750, coupon=50)
self.plan.save()
def create_payment_data(self):
order = Order(user=self.user, plan=self.plan, value=3000)
order.save()
self.order = order
return {'notify_time': '', 'notify_type': 'trade_status_sync',
'notify_id': 'csdfo834jr', 'sign_type': 'RSA',
'sign': 'no sign this time',
'out_trade_no': order.pk, 'subject': u'多啦A梦',
'payment_type': 1, 'trade_no': '2014112323e',
'trade_status': 'TRADE_FINISHED', 'seller_id': '2088xx',
'seller_email': 'toraysoft@gmail.com', 'buyer_id': '2088yy',
'buyer_email': 'bbmyth@gmail.com', 'total_fee': 30,
'quantity': 1, 'price': 30, 'body': u'不错的叮当主题哦',
'gmt_create': '', 'gmt_payment': '',
'is_total_fee_adjust': 'N', 'use_coupon': 'N', 'discount': '0'}
def test_alipay_callback(self):
# 正常流程, 第一次状态为等待付款,第二次为交易完成
data = self.create_payment_data()
c = Client()
data['trade_status'] = 'WAIT_BUYER_PAY'
payment_done.send = Mock()
task_done.send = Mock()
views.verify_notify_id = Mock(return_value=True)
views.verify_alipay_signature = Mock(return_value=True)
cache.set = Mock(return_value=None)
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('success', rsp.content)
self.assertEqual(payment_done.send.call_count, 0)
self.assertEqual(task_done.send.call_count, 0)
data['trade_status'] = 'TRADE_FINISHED'
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('success', rsp.content)
cost = int(round(config.EXCHANGE_RATE * 30))
payment_done.send.assert_called_with(sender=Charge,
cost=cost, user=self.user,
plan=self.plan, order=self.order)
task_done.send.assert_called_with(sender=Task, cost=50,
user=self.user)
def test_alipay_callback_sign_error(self):
# 签名错误
data = self.create_payment_data()
c = Client()
views.verify_notify_id = Mock(return_value=True)
views.verify_alipay_signature = Mock(return_value=False)
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('error', rsp.content)
def test_alipay_callback_invalidate_request(self):
# 非来自支付宝的请求
data = self.create_payment_data()
c = Client()
views.verify_notify_id = Mock(return_value=False)
views.verify_alipay_signature = Mock(return_value=True)
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('error', rsp.content)
def test_alipay_notifyid_duplicated(self):
# 重复收到同一个通知。通知ID同样。
data = self.create_payment_data()
views.verify_notify_id = Mock(return_value=True)
views.verify_alipay_signature = Mock(return_value=True)
cache.get = Mock(return_value=None)
cache.set = Mock()
payment_done.send = Mock()
c = Client()
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('success', rsp.content)
payment_done.send.assert_called_with(sender=Charge, cost=750,
user=self.user,
plan=self.plan, order=self.order)
cache.get = Mock(return_value='123')
payment_done.send = Mock()
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('error', rsp.content)
self.assertTrue(cache.get.assert_called)
self.assertEqual(0, payment_done.send.call_count)
def test_alipay_callback_status_revert(self):
# 同一个帐单,状态以先后不同的顺序回调。
data = self.create_payment_data()
data['trade_status'] = 'TRADE_FINISHED'
views.verify_notify_id = Mock(return_value=True)
views.verify_alipay_signature = Mock(return_value=True)
cache.set = Mock(return_value=None)
payment_done.send = Mock()
c = Client()
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('success', rsp.content)
self.assertEqual(1, payment_done.send.call_count)
payment_done.send = Mock()
data['trade_status'] = 'WAIT_BUYER_PAY'
data['notify_id'] = 'another_notify'
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual(0, payment_done.send.call_count)
self.assertEqual('success', rsp.content)
self.assertEqual(2, cache.set.call_count)
def test_alipay_callback_duplicated(self):
# 同一个帐单,相同状态重复发送,将不会充值成功。
data = self.create_payment_data()
data['trade_status'] = 'WAIT_BUYER_PAY'
views.verify_notify_id = Mock(return_value=True)
views.verify_alipay_signature = Mock(return_value=True)
cache.set = Mock()
payment_done.send = Mock()
c = Client()
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('success', rsp.content)
data['notify_id'] = 'another_notify'
rsp = c.get(reverse('alipay_cb'), data)
self.assertEqual('error', rsp.content)
self.assertEqual(1, cache.set.call_count)
self.assertEqual(0, payment_done.send.call_count)
def test_signature(self):
""" 测试之前,要去settings拷贝一个支付宝公钥
或者不对这个做单元测试
"""
sign = (u"DoqHII4KFb5QRp5J/bAQPMI/1nJwHf8IcVHDZvvNR5CHCEmAkelExygYooWi"
"yWchcBd2WHULCNtPKqFEWQALTynzUAkeF64zq9nyq8nzrVulwcKGnu+l"
"ja6Sg+2EILb3o8RuFcPOL/YAD5y1FxjJBUM33Z+LDcWgb/+eSMDiTQk=")
params = {
u"seller_email": u"randotech@126.com",
u"gmt_close": u"2014-09-02 11:37:03",
u"sign": sign,
u"subject": u"资助20元,赠送520金币",
u"is_total_fee_adjust": u"N",
u"gmt_create": u"2014-09-02 11:37:02",
u"out_trade_no": u"117800",
u"sign_type": u"RSA",
u"price": u"20.00",
u"buyer_email": u"mayuze13999087456@126.com",
u"discount": u"0.00",
u"trade_status": u"TRADE_FINISHED",
u"gmt_payment": u"2014-09-02 11:37:03",
u"trade_no": u"2014090200701660",
u"seller_id": u"2088311247579029",
u"use_coupon": u"N",
u"payment_type": u"1",
u"total_fee": u"20.00",
u"notify_time": u"2014-09-02 11:37:41",
u"buyer_id": u"2088502310925605",
u"notify_id": u"be431b210180989044cc985639b2a8635c",
u"notify_type": u"trade_status_sync",
u"quantity": u"1"
}
print 'views %s' % views.verify_alipay_signature
result = views.verify_alipay_signature('RSA', sign, params)
self.assertEqual(True, result)
@skipIfCustomUser
class AlipaySignTest(TestCase):
def setUp(self):
pass
def test_alipay_sign(self):
# 测试用key
settings.ALIPAY_PRIVATE_KEY = (
'-----BEGIN RSA PRIVATE KEY-----\n'
'MIICXAIBAAKBgQCxgCa64qPZ5IKudC+YdEDi2eyLbAtub2h1aBMmHj3hyc1Vdzjh'
'HyUUt2rgJ7fQAnjNbypzOOWRjAuSsDhB3HfAdle7pJGU5HhVZEpVdNvvdErOMPj1'
'9IXjTtSc2kBej3E4ETZB0CAbAo6vGzqN8B33NXwxJ6TE3rO/aPAI0SCnUQIDAQAB'
'AoGAKWPKpDWJI5wHZQqutowVPVC3ueMd30iXQRldrbvLjkTyXoWIe+Y5TVVf1Jku'
'YZDR/oV3jpqr3X6cjD4PQDxap+D/246GK+a+eDQDLfleb2RtKF1bl/6jqVcbHtnR'
'kL0MNbYLkuneigVRCetAcGWRxv+BVVP9DYUBjAUq5GZyqAECQQDaFt64w0lj2Nq2'
'Zb/izenEHX7d5QfsXL3tI1Mhxvzc2CznoTEQgMWgq8ayHd1KUW3KqtZqlrddxYYP'
'OIAwHIQRAkEA0FsNqYpgU4VlzGzGrWuVq/JDdKYmWOjrk0UbIpKZtIZvvE5S06IV'
'KJx2fnKh31riLhIJIqoewcaBVmKCV2QvQQJAfAf1su6dloOGH6XOc5bYFAkSVfAj'
'iXFVMsCcTuF0fcUUBMfPt6sEulP3NOV3LQUSg+iU+RmuP05O5+kiPjp5gQJBALuG'
'iBhkw+fIM2Q3LuYc43v7svzFIdR55rUIyLBoM9EIAn8AG4oA4nxHvlp2f/yQRuvi'
'Lbi2VrJfID+Ir/lJ4UECQCgEcFtaNfdZMkQ7Icsw2xynaSJ/osQbpxcOwq4itZ56'
'xs80ciaAm/uEY7lKiLMmMrjLLD9PBqsrTHa3bMIFaPw='
'\n-----END RSA PRIVATE KEY-----')
words = ('partner="2088311247579029"&seller_id="randotech@126.com"&'
'out_trade_no="P5IRN0A7B8P1BR7"&subject="珍珠项链"&'
'body="[2元包邮]韩版 韩国 流行饰品太阳花小巧雏菊 珍珠项链2M15"&'
'total_fee="10.00"¬ify_url="http%3A%2F%2Fwwww.xxx.com"&'
'service="mobile.securitypay.pay"&_input_charset="utf-8"&'
'payment_type="1"&return_url="www.xxx.com"&it_b_pay="1d"&'
'show_url="www.xxx.com"')
data = {'words': words}
c = Client()
rsp = c.post(reverse('alipaysign'), data)
print rsp.content
class WechatTestCase(TestCase):
def request_content(self, xml):
cli = Client()
token = getattr(settings, 'GOLDENCAGE_WECHAT_TOKEN', '')
timestamp = str(time.time())
nonce = str(random.randint(1000, 9999))
sign_ele = [token, timestamp, nonce]
sign_ele.sort()
signature = sha1(''.join(sign_ele)).hexdigest()
params = {'timestamp': timestamp,
'nonce': nonce,
'signature': signature,
'echostr': '234'}
query_string = '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
rsp = cli.post('/gc/wechat/?' + query_string,
data=xml, content_type='text/xml').content
return rsp
def test_success(self):
"""获取礼券成功
"""
coupon = Coupon(name='test', cost=10, limit=1,
key='bb', exchange_style='wechat')
coupon.save()
user = User.objects.create_user('jeff', 'jeff@toraysoft.com', '123')
xml = """<xml>
<ToUserName><![CDATA[techparty]]></ToUserName>
<FromUserName><![CDATA[o_BfQjrOWghP2cM0cN7K0kkR54fA]]></FromUserName>
<CreateTime>1400131860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[bb1233]]></Content>
<MsgId>1234567890123456</MsgId>
</xml>"""
rsp = self.request_content(xml)
self.assertIn('无效的兑换码,或已被兑换过。', rsp)
exc = Exchange(coupon=coupon, user=user, cost=10, status='WAITING',
exchange_code='1233')
exc.save()
xml = """<xml>
<ToUserName><![CDATA[techparty]]></ToUserName>
<FromUserName><![CDATA[o_BfQjrOWghP2cM0cN7K0kkR54fA]]></FromUserName>
<CreateTime>1400131860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[bb1233]]></Content>
<MsgId>1234567890123456</MsgId>
</xml>"""
rsp = self.request_content(xml)
self.assertIn('您已获得了10金币', rsp)
xml = """<xml>
<ToUserName><![CDATA[techparty]]></ToUserName>
<FromUserName><![CDATA[o_BfQjrOWghP2cM0cN7K0kkR54fA]]></FromUserName>
<CreateTime>1400131860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[bb1233]]></Content>
<MsgId>1234567890123456</MsgId>
</xml>"""
rsp = self.request_content(xml)
self.assertIn('无效的兑换码,或已被兑换过。', rsp)
@skipIfCustomUser
class WechatpayTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('jeff',
'jeff@toraysoft.com', '123')
self.plan = ChargePlan(name=u'plan1', code='plan1',
value=30, cost=750, coupon=50)
self.plan.save()
def test_gen_package(self):
cli = Client()
package = {'bank_type': 'WX', 'body': '千足 金箍棒',
'fee_type': '1', 'input_charset': 'UTF-8',
'getOutTradNo': '81282373272',
'spbill_create_ip': '192.168.1.1', 'total_fee': '1'}
data = {'traceid': 'ikaoiaos', 'package': package}
data = json.dumps(data)
print 'data = %s' % data
rsp = cli.post('/gc/wechatpaypackage/',
data=data, content_type='application/json')
print rsp.content
def test_xml_to_dict(self):
from .views import _wechatpay_xml_to_dict
raw_str = ("""
<xml>
<OpenId><![CDATA[111222]]></OpenId>
<AppId><![CDATA[wwwwb4f85f3a797777]]></AppId>
<IsSubscribe>1</IsSubscribe>
<TimeStamp> 1369743511</TimeStamp>
<NonceStr><![CDATA[jALldRTHAFd5Tgs5]]></NonceStr>
<AppSignature><![CDATA[bafe07f060fdb4b5ff756f973aecffa]]>
</AppSignature>
<SignMethod><![CDATA[sha1]]></SignMethod >
</xml>""")
dict_ret = _wechatpay_xml_to_dict(raw_str)
print dict_ret
def test_wechatpay_get_info(self):
plan = ChargePlan()
plan.name = u'商品商品'
plan.value = 10000
plan.cost = 100
plan.save()
from goldencage.views import wechat_pay_get_access_token
from goldencage.views import wechatpay_get_info
content = wechat_pay_get_access_token()
access_token = content.get('access_token')
if not access_token:
print content
data, err = wechatpay_get_info(
access_token, plan.id, '123321', '127.0.0.1', 'traceiddd')
def test_wechatpay_notify(self):
order = Order(id=1115, user=self.user, plan=self.plan, value=30)
order.save()
body = """
<xml><OpenId><![CDATA[oaCDJju5TzPSv0ZT_GP5nLsPAQfY]]></OpenId>
<AppId><![CDATA[wx6745aaa6e2878f99]]></AppId>
<IsSubscribe>0</IsSubscribe>
<TimeStamp>1418365289</TimeStamp>
<NonceStr><![CDATA[kLI9t3MWRx4RYZVu]]></NonceStr>
<AppSignature><![CDATA[16d03d4341d62a3d635c7593cc84eb1554c36205]]></AppSignature>
<SignMethod><![CDATA[sha1]]></SignMethod>
</xml>
"""
params = 'bank_billno=201412126100895338&bank_type=3006&discount=0&fee_type=1&input_charset=UTF-8¬ify_id=epFRTtDSAK6AGztmmEb5cOpCQCzg06fiAj8D9w6l_0VbjHy2_6NnDpKIs5un-g5TJTsCCDC1ZA8jFy3WY2VV1nWNYehhK-Tg&out_trade_no=1115&partner=1222813501&product_fee=1&sign=6265C0C62683BE1F5F7C6D688A25CD00&sign_type=MD5&time_end=20141212142129&total_fee=1&trade_mode=1&trade_state=0&transaction_id=1222813501201412126039873136&transport_fee=0'
url = '/gc/wechatcb/?%s' % params
cli = Client()
rsp = cli.post(url, data=body, content_type='application/xml')
print '+++++++++++++++++'
print rsp.content
def test_wechatpay_mp_get_info(self):
print '+++++++++++++++++'
from goldencage.views import wechatpay_mp_get_info
prepay_id, errmsg = wechatpay_mp_get_info(
self.plan.id,
out_trade_no='123',
client_ip='127.0.0.1',
openid='oFTfqjmMVWKo7GM0vuFhpJHWDjh4',
trade_type='JSAPI'
)
print prepay_id
print errmsg
def test_wechat_mp_pay_verify(self):
from goldencage.views import wechat_mp_pay_verify
req_dict = {'openid': 'oFTfqjiGdMbL-6I04rqcU_PNziyg', 'trade_type': 'JSAPI', 'total_fee': '1', 'return_code': 'SUCCESS', 'nonce_str': 'lu5lr4a272iy5', 'is_subscribe': 'Y', 'fee_type': 'CNY', 'bank_type': 'CMB_CREDIT', 'mch_id':
'1229194702', 'out_trade_no': '277250', 'transaction_id': '1003760227201502030012096138', 'time_end': '20150203223430', 'appid': 'wx02dce087b6279278', 'sign': 'FF3BBBE7E99D6043510F85FCFC322B08', 'cash_fee': '1', 'result_code': 'SUCCESS'}
wechat_mp_pay_verify(req_dict)
body = """
<xml><appid><![CDATA[wx02dce087b7279278]]></appid>
<bank_type><![CDATA[CMB_CREDIT]]></bank_type>
<cash_fee><![CDATA[1]]></cash_fee>
<fee_type><![CDATA[CNY]]></fee_type>
<is_subscribe><![CDATA[Y]]></is_subscribe>
<mch_id><![CDATA[12213494702]]></mch_id>
<nonce_str><![CDATA[lu5lr4a272iy5]]></nonce_str>
<openid><![CDATA[oFTfqjiGdMbL-6I04rqcU_PNziyg]]></openid>
<out_trade_no><![CDATA[277250]]></out_trade_no>
<result_code><![CDATA[SUCCESS]]></result_code>
<return_code><![CDATA[SUCCESS]]></return_code>
<sign><![CDATA[FF3BBBE7E99D6043510F85FCFC322B08]]></sign>
<time_end><![CDATA[20150203223430]]></time_end>
<total_fee>1</total_fee>
<trade_type><![CDATA[JSAPI]]></trade_type>
<transaction_id><![CDATA[1003760227201502030012096138]]></transaction_id>
</xml>
"""
url = '/gc/wechatmpcb/'
cli = Client()
rsp = cli.post(url, data=body, content_type='application/xml')
print '+++++++++++++++++'
print rsp.content
| bsd-3-clause |
wetneb/django | django/template/backends/jinja2.py | 14 | 2120 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils import six
from django.utils.module_loading import import_string
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class Jinja2(BaseEngine):
app_dirname = 'jinja2'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(Jinja2, self).__init__(params)
environment = options.pop('environment', 'jinja2.Environment')
environment_cls = import_string(environment)
options.setdefault('autoescape', True)
options.setdefault('loader', jinja2.FileSystemLoader(self.template_dirs))
options.setdefault('auto_reload', settings.DEBUG)
options.setdefault('undefined',
jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code))
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name))
except jinja2.TemplateNotFound as exc:
six.reraise(TemplateDoesNotExist, TemplateDoesNotExist(exc.args),
sys.exc_info()[2])
except jinja2.TemplateSyntaxError as exc:
six.reraise(TemplateSyntaxError, TemplateSyntaxError(exc.args),
sys.exc_info()[2])
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template.render(**context)
| bsd-3-clause |
buckinha/gravity | SWMv2_1.py | 1 | 17498 | """SWM, A Simple Wildfire-inspired MDP model. Version 2.1"""
"""
SWMv2.1
New Features:
-An additional weather variable ("moisture") which works against the spread of large fires and which has
a separate threshold.
-The policy function now includes the current state variables "timber" and "vulnerability" as parameters
in addition to the original. The full policy is now [CONS, HEAT, MOISTURE, TIMBER, VULNERABILITY, HABITAT]
-habitat value: a seperate reward structure with different criteria than the main "reward"
-Using feature transformations so that the logistic sees only features which have mean ~= 0 and STD ~= 0.5
-OPTIONALLY using feature vector length adjustment so that the crossproduct inside of the logistic policy
function is in the same range, regardless of how many values are in the vector
"""
import random, math, MDP
import numpy as np
from feature_transformation import feature_transformation as feature_trans
def simulate(timesteps, policy=[0,0,0,0,0,0], random_seed=0, model_parameters={}, SILENT=False, PROBABILISTIC_CHOICES=True):
random.seed(random_seed)
#range of the randomly drawn, uniformally distributed "event" that corresponds to "heat" and "moisture"
event_max = 1.0
event_min = 0.0
#state variable bounds
vuln_max = 1.0
vuln_min = 0.02
timber_max = 1.0
timber_min = 0.0
#timber multiplier: this is to scale the rewards while leaving the raw timber values between 0 and 1
timber_multiplier = 10.0
timesteps = int(timesteps)
#sanitize policy
pol = sanitize_policy(policy)
#REWARD STRUCTURE
#cost of suppression in a mild event
supp_cost_mild = 9
if "Suppression Cost - Mild Event" in model_parameters.keys(): supp_cost_mild = model_parameters["Suppression Cost - Mild Event"]
#cost of suppresion in a severe event
supp_cost_severe = 13
if "Suppression Cost - Severe Event" in model_parameters.keys(): supp_cost_severe = model_parameters["Suppression Cost - Severe Event"]
#cost of a severe fire on the next timestep
burn_cost = 40
if "Severe Burn Cost" in model_parameters.keys(): burn_cost = model_parameters["Severe Burn Cost"]
#TRANSITION VARIABLES
vuln_change_after_suppression = 0.01
vuln_change_after_mild = -0.01
vuln_change_after_severe = -0.015
if "Vulnerability Change After Suppression" in model_parameters.keys(): vuln_change_after_suppression = model_parameters["Vulnerability Change After Suppression"]
if "Vulnerability Change After Mild" in model_parameters.keys(): vuln_change_after_mild = model_parameters["Vulnerability Change After Mild"]
if "Vulnerability Change After Severe" in model_parameters.keys(): vuln_change_after_severe = model_parameters["Vulnerability Change After Severe"]
timber_change_after_suppression = 0.01
timber_change_after_mild = 0.01
timber_change_after_severe = -0.5
if "Timber Value Change After Suppression" in model_parameters.keys(): timber_change_after_suppression = model_parameters["Timber Value Change After Suppression"]
if "Timber Value Change After Mild" in model_parameters.keys(): timber_change_after_mild = model_parameters["Timber Value Change After Mild"]
if "Timber Value Change After Severe" in model_parameters.keys(): timber_change_after_severe = model_parameters["Timber Value Change After Severe"]
#habitat transition variables
habitat_mild_maximum = 15
habitat_mild_minimum = 0
habitat_severe_maximum = 40
habitat_severe_minimum = 10
habitat_loss_if_no_mild = 0.2
habitat_loss_if_no_severe = 0.2
habitat_gain = 0.1
if "Probabilistic Choices" in model_parameters.keys():
if model_parameters["Probabilistic Choices"] == "True":
PROBABILISTIC_CHOICES = True
else:
PROBABILISTIC_CHOICES = False
#starting_condition = 0.8
starting_Vulnerability = random.uniform(0.2,0.8)
if "Starting Vulnerability" in model_parameters.keys(): starting_condition = model_parameters["Starting Condition"]
starting_timber = random.uniform(0.2,0.8)
if "Starting Timber Value" in model_parameters.keys(): starting_timber = model_parameters["Starting Timber Value"]
starting_habitat = random.uniform(0.2,0.8)
if "Starting Habitat Value" in model_parameters.keys(): starting_habitat = model_parameters["Starting Habitat Value"]
#setting 'enums'
MILD=0
SEVERE=1
#starting simulations
states = [None] * timesteps
#start current condition randomly among the three states
current_vulnerability = starting_Vulnerability
current_timber = starting_timber
current_habitat = starting_habitat
time_since_severe = 0
time_since_mild = 0
#instantiating the feature list now, to save cycles
curent_features = [0.0, 0.0, 0.0, 0.0, 0.0]
for i in range(timesteps):
#generate the two weather variables for this event
heat = random.uniform(event_min, event_max)
moisture = random.uniform(event_min, event_max)
#severity is meant to be a hidden, "black box" variable inside the MDP
# and not available to the logistic function as a parameter
severity = MILD
if heat >= (1 - current_vulnerability):
#if moisture < ((event_max - event_min) * 0.3):
if True:
severity = SEVERE
#logistic function for the policy choice
curent_features = [heat, moisture, current_timber, current_vulnerability, current_habitat]
policy_value = policy_function(curent_features, pol)
#rolling a value to compare to the policy 'probability' in policy_value
choice_roll = random.uniform(0,1)
#assume let-burn for the moment...
choice = False
choice_prob = 1.0 - policy_value
#check for suppress, and update values if necessary
if PROBABILISTIC_CHOICES:
if choice_roll < policy_value:
choice = True
choice_prob = policy_value
else:
if policy_value >= 0.5:
choice = True
choice_prob = policy_value
### CALCULATE PRIMARY REWARD ###
supp_cost = 0
burn_penalty = 0
if choice:
#suppression was chosen
if severity == MILD:
supp_cost = supp_cost_mild
elif severity == SEVERE:
supp_cost = supp_cost_severe
else:
#suppress was NOT chosen
if severity == SEVERE:
#set this timestep's burn penalty to the value given in the overall model parameter
#this is modeling the timber values lost in a large fire.
burn_penalty = burn_cost
current_reward = (timber_multiplier * current_timber) - supp_cost - burn_penalty
#current_reward = 10 + (timber_multiplier * current_timber) - supp_cost - burn_penalty
#Record state information
#states[i] = [current_vulnerability, current_timber, heat, moisture, choice, choice_prob, policy_value, current_reward, current_habitat, i]
states[i] = {
"Vulnerability": current_vulnerability,
"Timber": current_timber,
"Heat": heat,
"Moisture": moisture,
"Choice": choice,
"Choice Probability": choice_prob,
"Policy Value": policy_value,
"Reward": current_reward,
"Habitat": 10 * current_habitat,
"Time Step": i
}
### TRANSITION ###
if not choice:
#no suppression
if severity == SEVERE:
current_vulnerability += vuln_change_after_severe
current_timber += timber_change_after_severe
#reset both timers
time_since_severe = 0
time_since_mild += 1
elif severity == MILD:
current_vulnerability += vuln_change_after_mild
current_timber += timber_change_after_mild
#reset mild, increment severe
time_since_mild = 0
time_since_severe += 1
else:
#suppression
current_vulnerability += vuln_change_after_suppression
current_timber += timber_change_after_suppression
#increment both timers
time_since_mild += 1
time_since_severe += 1
#check for habitat changes.
#Note to self: suppression effects are already taken into account above
if ( (time_since_mild <= habitat_mild_maximum) and
(time_since_mild >= habitat_mild_minimum) and
(time_since_severe <= habitat_severe_maximum) and
(time_since_severe >= habitat_severe_minimum) ):
#this fire is happy on all counts
current_habitat += habitat_gain
else:
#this fire is unhappy in some way.
if (time_since_mild > habitat_mild_maximum) or (time_since_mild < habitat_mild_minimum):
current_habitat -= habitat_loss_if_no_mild
if (time_since_severe > habitat_severe_maximum) or (time_since_severe < habitat_severe_minimum):
current_habitat -= habitat_loss_if_no_severe
#Enforce state variable bounds
if current_vulnerability > vuln_max: current_vulnerability = vuln_max
if current_vulnerability < vuln_min: current_vulnerability = vuln_min
if current_timber > timber_max: current_timber = timber_max
if current_timber < timber_min: current_timber = timber_min
if current_habitat > 1: current_habitat = 1
if current_habitat < 0: current_habitat = 0
#finished simulations, report some values
vals = []
hab = []
suppressions = 0.0
joint_prob = 1.0
prob_sum = 0.0
#state information is stored as:
# states[i] = {
# "Vulnerability": current_vulnerability,
# "Timber": current_timber,
# "Heat": heat,
# "Moisture": moisture,
# "Choice": choice,
# "Choice Probability": choice_prob,
# "Policy Value": policy_value,
# "Reward": current_reward,
# "Habitat": current_habitat,
# "Time Step", i
# }
for i in range(timesteps):
if states[i]["Choice"]: suppressions += 1
joint_prob *= states[i]["Choice Probability"]
prob_sum += states[i]["Choice Probability"]
vals.append(states[i]["Reward"])
hab.append(states[i]["Habitat"])
ave_prob = prob_sum / timesteps
summary = {
"Average State Value": round(np.mean(vals),3),
"Total Pathway Value": round(np.sum(vals),3),
"STD State Value": round(np.std(vals),1),
"Average Habitat Value": round(np.mean(hab),1),
"Suppressions": suppressions,
"Suppression Rate": round((float(suppressions)/timesteps),2),
"Joint Probability": joint_prob,
"Average Probability": round(ave_prob, 3),
"ID Number": random_seed,
"Timesteps": timesteps,
"Generation Policy": policy,
"Version": "2.1",
"Vulnerability Min": vuln_min,
"Vulnerability Max": vuln_max,
"Vulnerability Change After Suppression": vuln_change_after_suppression,
"Vulnerability Change After Mild": vuln_change_after_mild,
"Vulnerability Change After Severe": vuln_change_after_severe,
"Timber Value Min": timber_min,
"Timber Value Max": timber_max,
"Timber Value Change After Suppression": timber_change_after_suppression,
"Timber Value Change After Mild": timber_change_after_mild,
"Timber Value Change After Severe": timber_change_after_severe,
"Suppression Cost - Mild": supp_cost_mild,
"Suppression Cost - Severe": supp_cost_severe,
"Severe Burn Cost": burn_cost
}
if not SILENT:
print("")
print("Simulation Complete - Pathway " + str(random_seed))
print("Average State Value: " + str(round(np.mean(vals),1)) + " STD: " + str(round(np.std(vals),1)))
print("Average Habitat Value: " + str(round(np.mean(hab),1)) )
print("Suppressions: " + str(suppressions))
print("Suppression Rate: " + str(round((float(suppressions)/timesteps),2)))
print("Joint Probability:" + str(joint_prob))
print("Average Probability: " + str(round(ave_prob, 3)))
print("")
summary["States"] = states
return summary
def policy_function(feature_vector, policy, vector_length_scaling=False):
"""Calculates and returns the logistic policy function's value of a given feature vector"""
#get the transformed values of the features
transformed_features = feature_trans(feature_vector)
#add the constant term
transformed_features = [1.0] + transformed_features
#do the raw crossproduct of the features times the values
cross_product = np.sum( [transformed_features[i] * policy[i] for i in range(len(policy))] )
#do the vector length scaling step, if desired
if vector_length_scaling:
cross_product = cross_product * (1.0 / np.sqrt(len(policy)))
#get the logistic function value
func_val = 1.0 / (1.0 + np.exp(-1.0 * cross_product))
return func_val
def sanitize_policy(policy):
pol = []
if isinstance(policy, list):
if len(policy) < 6:
#it's under length for some reason, so append zeros
z = [0] * (6-len(pol))
pol = pol + z
else:
#the length is good, so just assign it. Extra values will just be ignored.
pol = policy[:]
else:
#it's not a list, so find out what string it is
if policy == 'LB': pol = [-20,0,0,0,0,0]
elif policy == 'SA': pol = [ 20,0,0,0,0,0]
elif policy == 'CT': pol = [ 0,0,0,0,0,0]
else: pol = [0,0,0,0,0,0] #using CT as a catch-all for when the string is "MIXED_CT" or whatnot
return pol
def convert_to_MDP_pathway(SWMv2_pathway,VALUE_ON_HABITAT=False, percentage_habitat=0):
""" Converts a SWMv2 pathway into a generic MDP_Pathway object and returns it"""
#create a new MDP pathway object, with policy length = 5
new_MDP_pw = MDP.MDP_Pathway(6)
new_MDP_pw.ID_number = SWMv2_pathway["ID Number"]
new_MDP_pw.net_value = SWMv2_pathway["Total Pathway Value"]
new_MDP_pw.actions_1_taken = SWMv2_pathway["Suppressions"]
new_MDP_pw.actions_0_taken = SWMv2_pathway["Timesteps"] - SWMv2_pathway["Suppressions"]
new_MDP_pw.generation_joint_prob = SWMv2_pathway["Joint Probability"]
new_MDP_pw.set_generation_policy_parameters(SWMv2_pathway["Generation Policy"][:])
for i in range(len(SWMv2_pathway["States"])):
event = MDP.MDP_Event(i)
#state information is stored as:
# states[i] = {
# "Vulnerability": current_vulnerability,
# "Timber": current_timber,
# "Heat": heat,
# "Moisture": moisture,
# "Choice": choice,
# "Choice Probability": choice_prob,
# "Policy Value": policy_value,
# "Reward": current_reward,
# "Habitat": current_habitat,
# "Time Step", i
# }
heat = SWMv2_pathway["States"][i]["Heat"]
moisture = SWMv2_pathway["States"][i]["Moisture"]
timber = SWMv2_pathway["States"][i]["Timber"]
vulnerabilty = SWMv2_pathway["States"][i]["Vulnerability"]
habitat = SWMv2_pathway["States"][i]["Habitat"]
event.state = [1, heat, moisture, timber, vulnerabilty, habitat ]
event.state_length = 6
event.action = SWMv2_pathway["States"][i]["Choice"]
event.decision_prob = SWMv2_pathway["States"][i]["Choice Probability"]
event.action_prob = SWMv2_pathway["States"][i]["Policy Value"]
#setting value. Value_on_habitat takes precedence, followed by percentage_habitat.
#if neither are set, then the default is to use the regular (timber val - supp cost) reward.
if VALUE_ON_HABITAT:
event.rewards = SWMv2_pathway["States"][i]["Habitat"]
elif percentage_habitat > 0:
part_hab = SWMv2_pathway["States"][i]["Habitat"] * percentage_habitat
part_val = SWMv2_pathway["States"][i]["Habitat"] * (1 - percentage_habitat)
event.rewards = part_hab + part_val
else:
event.rewards = SWMv2_pathway["States"][i]["Reward"]
new_MDP_pw.events.append(event)
#everything needed for the MDP object has been filled in, so now
# remove the states (at least) and add the rest of the SWM dictionary's entries as metadata
SWMv2_pathway.pop("States",None)
new_MDP_pw.metadata=SWMv2_pathway
return new_MDP_pw
| mpl-2.0 |
thdtjsdn/geonode | geonode/documents/forms.py | 17 | 5716 | import json
import os
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.forms import HiddenInput, TextInput
from modeltranslation.forms import TranslationModelForm
from geonode.documents.models import Document
from geonode.maps.models import Map
from geonode.layers.models import Layer
from geonode.base.forms import ResourceBaseForm
class DocumentForm(ResourceBaseForm):
resource = forms.ChoiceField(label='Link to')
def __init__(self, *args, **kwargs):
super(DocumentForm, self).__init__(*args, **kwargs)
rbases = list(Layer.objects.all())
rbases += list(Map.objects.all())
rbases.sort(key=lambda x: x.title)
rbases_choices = []
rbases_choices.append(['no_link', '---------'])
for obj in rbases:
type_id = ContentType.objects.get_for_model(obj.__class__).id
obj_id = obj.id
form_value = "type:%s-id:%s" % (type_id, obj_id)
display_text = '%s (%s)' % (obj.title, obj.polymorphic_ctype.model)
rbases_choices.append([form_value, display_text])
self.fields['resource'].choices = rbases_choices
if self.instance.content_type:
self.fields['resource'].initial = 'type:%s-id:%s' % (
self.instance.content_type.id, self.instance.object_id)
def save(self, *args, **kwargs):
contenttype_id = None
contenttype = None
object_id = None
resource = self.cleaned_data['resource']
if resource != 'no_link':
matches = re.match("type:(\d+)-id:(\d+)", resource).groups()
contenttype_id = matches[0]
object_id = matches[1]
contenttype = ContentType.objects.get(id=contenttype_id)
self.cleaned_data['content_type'] = contenttype_id
self.cleaned_data['object_id'] = object_id
self.instance.object_id = object_id
self.instance.content_type = contenttype
return super(DocumentForm, self).save(*args, **kwargs)
class Meta(ResourceBaseForm.Meta):
model = Document
exclude = ResourceBaseForm.Meta.exclude + (
'content_type',
'object_id',
'doc_file',
'extension',
'doc_type',
'doc_url')
class DocumentDescriptionForm(forms.Form):
title = forms.CharField(300)
abstract = forms.CharField(1000, widget=forms.Textarea, required=False)
keywords = forms.CharField(500, required=False)
class DocumentReplaceForm(forms.ModelForm):
"""
The form used to replace a document.
"""
class Meta:
model = Document
fields = ['doc_file', 'doc_url']
def clean(self):
"""
Ensures the doc_file or the doc_url field is populated.
"""
cleaned_data = super(DocumentReplaceForm, self).clean()
doc_file = self.cleaned_data.get('doc_file')
doc_url = self.cleaned_data.get('doc_url')
if not doc_file and not doc_url:
raise forms.ValidationError(_("Document must be a file or url."))
if doc_file and doc_url:
raise forms.ValidationError(
_("A document cannot have both a file and a url."))
return cleaned_data
def clean_doc_file(self):
"""
Ensures the doc_file is valid.
"""
doc_file = self.cleaned_data.get('doc_file')
if doc_file and not os.path.splitext(
doc_file.name)[1].lower()[
1:] in settings.ALLOWED_DOCUMENT_TYPES:
raise forms.ValidationError(_("This file type is not allowed"))
return doc_file
class DocumentCreateForm(TranslationModelForm):
"""
The document upload form.
"""
permissions = forms.CharField(
widget=HiddenInput(
attrs={
'name': 'permissions',
'id': 'permissions'}),
required=True)
resource = forms.CharField(
required=False,
label=_("Link to"),
widget=TextInput(
attrs={
'name': 'title__contains',
'id': 'resource'}))
class Meta:
model = Document
fields = ['title', 'doc_file', 'doc_url']
widgets = {
'name': HiddenInput(attrs={'cols': 80, 'rows': 20}),
}
def clean_permissions(self):
"""
Ensures the JSON field is JSON.
"""
permissions = self.cleaned_data['permissions']
try:
return json.loads(permissions)
except ValueError:
raise forms.ValidationError(_("Permissions must be valid JSON."))
def clean(self):
"""
Ensures the doc_file or the doc_url field is populated.
"""
cleaned_data = super(DocumentCreateForm, self).clean()
doc_file = self.cleaned_data.get('doc_file')
doc_url = self.cleaned_data.get('doc_url')
if not doc_file and not doc_url:
raise forms.ValidationError(_("Document must be a file or url."))
if doc_file and doc_url:
raise forms.ValidationError(
_("A document cannot have both a file and a url."))
return cleaned_data
def clean_doc_file(self):
"""
Ensures the doc_file is valid.
"""
doc_file = self.cleaned_data.get('doc_file')
if doc_file and not os.path.splitext(
doc_file.name)[1].lower()[
1:] in settings.ALLOWED_DOCUMENT_TYPES:
raise forms.ValidationError(_("This file type is not allowed"))
return doc_file
| gpl-3.0 |
sprockets/sprockets.handlers.status | sprockets/handlers/status/__init__.py | 1 | 2601 | """
sprockets.handlers.status
A small handler for reporting application status
"""
import pkg_resources
from tornado import web
__version__ = '0.1.2'
UNKNOWN = 'unknown'
MAINTENANCE = 'maintenance'
OK = 'ok'
APPLICATION = UNKNOWN
def set_application(name):
"""Set the application name that is reported in the status.
:param str name: The application name
"""
global APPLICATION
APPLICATION = name
class StatusHandler(web.RequestHandler):
"""Implement a status handler endpoint that can be used to get information
about the current service
"""
def get(self, *args, **kwargs):
"""Tornado RequestHandler GET request endpoint for reporting status
:param list args: positional args
:param dict kwargs: keyword args
"""
self.set_status(self._status_response_code())
self.write(self._status_response())
def _application_status(self):
"""Extend this method to return application status dynamically.
If the value returns ``maintenance`` a ``503`` status code will be
returned for any request to the handler. If the values contains
anything but ``ok``, a ``500`` status code will be returned by the
handler.
:rtype: str
"""
return getattr(self.application, 'status', 'ok')
@staticmethod
def _package_version():
if APPLICATION == UNKNOWN:
return UNKNOWN
try:
return pkg_resources.get_distribution(APPLICATION).version
except pkg_resources.DistributionNotFound:
return UNKNOWN
def _status_response(self):
"""Return the status payload. Extend this method if you would like
to inject additional status information into the response.
For example:
.. code:: python
class MyStatusHandle(StatusHandler):
def status(self):
response = super(MyStatusHandler, self).status()
response['foo'] = 'bar'
return response
:rtype: dict
"""
return {
'application': APPLICATION,
'status': self._application_status(),
'version': self._package_version()
}
def _status_response_code(self):
"""Return the status code for the request based upon the application
status.
:rtype: int
"""
status = self._application_status()
if status == OK:
return 200
elif status == MAINTENANCE:
return 503
return 500
| bsd-3-clause |
greatyao/dumputils | dumputils/theserver.py | 1 | 4166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015
from __future__ import absolute_import, division, print_function, with_statement
import logging
import sqlite3
import os
import sys
from gevent.server import StreamServer
from dumputils import common
from .common import send_data_safe, recv_data_safe, daemon_start
from .message import Message
from .encrypt import Encryptor
SQLiteDB3 = "dumpsrv.db3"
def open_db(path):
conn = sqlite3.connect(path, check_same_thread=False)
if os.path.exists(path) and os.path.isfile(path):
pass
else:
conn = sqlite3.connect(':memory:')
try:
conn.execute('create table records (id integer primary key, host text, filename text, status int)')
except Exception as ex:
pass
return conn
def record_exist(conn, host, filename):
c = conn.cursor()
try:
c.execute("select id, status from records where host='%s' and filename='%s'" %(host, filename))
r = c.fetchone()
if r is None:
return False
if r[1] == 1:
return True
return False
finally:
c.close()
def insert_record(conn, host, filename):
if record_exist(conn, host, filename):
return True
c = conn.cursor()
try:
c.execute("insert into records values(NULL, '%s', '%s', %d)" %(host, filename, 1))
conn.commit()
return True
except Exception as ex:
logging.warn('failed to insert into record: %s' %(ex))
return False
finally:
c.close()
db = open_db(SQLiteDB3)
def client_handle(socket, address):
logging.info('New connection from %s:%s' % address)
host = address[0]
port = address[1]
key = '%s:%s' % address
encryptor = Encryptor(key, common.METHOD)
ret, cmd, resp, body = recv_data_safe(socket, encryptor=encryptor)
if cmd != common.CMD_LOGIN or ret != 0:
logging.warn('maybe invalid client, drop it')
return
send_data_safe(socket, 0, cmd, resp, None)
try:
os.mkdir(host)
except:
pass
fd = None
fn = ''
size = recv_size = -1
while True:
ret, cmd, resp, body = recv_data_safe(socket, encryptor=encryptor)
if ret == common.ERR_INVALID:
continue
elif ret == common.ERR_CONNLOST:
logging.warn("client disconnected")
break
if cmd == common.CMD_QUERY:
resp = record_exist(db, host, body)
flag = send_data_safe(socket, 0, cmd, resp, encryptor=encryptor)
elif cmd == common.CMD_UPLOAD_META:
msg = Message(body)
fn = msg.get_string()
size = msg.get_int()
recv_size = 0
logging.info('client %s uploading \'%s\', size=%d' %(host, fn, size))
fd = open(host + '/' + fn, 'wb')
flag = send_data_safe(socket, 0, cmd, resp, encryptor=encryptor)
elif cmd == common.CMD_UPLOAD:
recv_size += ret
fd.write(body)
elif cmd == common.CMD_UPLOAD_END:
logging.info('client %s complete uploading \'%s\'' %(host, fn))
fd.close()
if size >= 0 and recv_size == size:
insert_record(db, host, fn)
size = recv_size = -1
elif cmd == common.CMD_LOGOUT:
socket.close()
logging.info("client logout")
return
else:
pass
if __name__ == '__main__':
port = 23456
if len(sys.argv) >= 2:
port = int(sys.argv[1])
logging.getLogger('').handlers = []
logging.basicConfig(level = logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
daemon_start('dumpserver.log')
# to make the server use SSL, pass certfile and keyfile arguments to the constructor
server = StreamServer(('0.0.0.0', port), client_handle)
# to start the server asynchronously, use its start() method;
# we use blocking serve_forever() here because we have no other jobs
logging.info('Starting dump server on port %d' %(port))
server.serve_forever()
| apache-2.0 |
luckylavish/zamboni | mkt/webapps/migrations/0001_initial.py | 13 | 48611 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
import mkt.site.models
import mkt.users.models
import mkt.translations.models
import django_extensions.db.fields.json
import django.db.models.deletion
import mkt.translations.fields
import mkt.webapps.models
from django.conf import settings
import mkt.constants.regions
class Migration(migrations.Migration):
dependencies = [
('translations', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('versions', '0001_initial'),
('tags', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AddonDeviceType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('device_type', models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA)])),
],
options={
'db_table': 'addons_devicetypes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AddonExcludedRegion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('region', models.PositiveIntegerField(db_index=True, choices=[(1, mkt.constants.regions.RESTOFWORLD), (63, mkt.constants.regions.AND), (241, mkt.constants.regions.ARE), (58, mkt.constants.regions.AFG), (67, mkt.constants.regions.ATG), (65, mkt.constants.regions.AIA), (60, mkt.constants.regions.ALB), (68, mkt.constants.regions.ARM), (64, mkt.constants.regions.AGO), (66, mkt.constants.regions.ATA), (20, mkt.constants.regions.ARG), (62, mkt.constants.regions.ASM), (71, mkt.constants.regions.AUT), (70, mkt.constants.regions.AUS), (69, mkt.constants.regions.ABW), (59, mkt.constants.regions.ALA), (72, mkt.constants.regions.AZE), (84, mkt.constants.regions.BIH), (75, mkt.constants.regions.BRB), (31, mkt.constants.regions.BGD), (77, mkt.constants.regions.BEL), (89, mkt.constants.regions.BFA), (88, mkt.constants.regions.BGR), (74, mkt.constants.regions.BHR), (90, mkt.constants.regions.BDI), (79, mkt.constants.regions.BEN), (253, mkt.constants.regions.BLM), (80, mkt.constants.regions.BMU), (87, mkt.constants.regions.BRN), (82, mkt.constants.regions.BOL), (252, mkt.constants.regions.BES), (7, mkt.constants.regions.BRA), (73, mkt.constants.regions.BHS), (81, mkt.constants.regions.BTN), (85, mkt.constants.regions.BVT), (45, mkt.constants.regions.BWA), (76, mkt.constants.regions.BLR), (78, mkt.constants.regions.BLZ), (92, mkt.constants.regions.CAN), (97, mkt.constants.regions.CCK), (100, mkt.constants.regions.COD), (54, mkt.constants.regions.CAF), (99, mkt.constants.regions.COG), (226, mkt.constants.regions.CHE), (40, mkt.constants.regions.CIV), (101, mkt.constants.regions.COK), (23, mkt.constants.regions.CHL), (42, mkt.constants.regions.CMR), (21, mkt.constants.regions.CHN), (9, mkt.constants.regions.COL), (27, mkt.constants.regions.CRI), (103, mkt.constants.regions.CUB), (93, mkt.constants.regions.CPV), (254, mkt.constants.regions.CUW), (96, mkt.constants.regions.CXR), (105, mkt.constants.regions.CYP), (34, mkt.constants.regions.CZE), (14, mkt.constants.regions.DEU), (107, mkt.constants.regions.DJI), (106, mkt.constants.regions.DNK), (108, mkt.constants.regions.DMA), (109, mkt.constants.regions.DOM), (61, mkt.constants.regions.DZA), (26, mkt.constants.regions.ECU), (112, mkt.constants.regions.EST), (43, mkt.constants.regions.EGY), (248, mkt.constants.regions.ESH), (111, mkt.constants.regions.ERI), (8, mkt.constants.regions.ESP), (113, mkt.constants.regions.ETH), (117, mkt.constants.regions.FIN), (116, mkt.constants.regions.FJI), (114, mkt.constants.regions.FLK), (168, mkt.constants.regions.FSM), (115, mkt.constants.regions.FRO), (30, mkt.constants.regions.FRA), (121, mkt.constants.regions.GAB), (127, mkt.constants.regions.GRD), (123, mkt.constants.regions.GEO), (118, mkt.constants.regions.GUF), (130, mkt.constants.regions.GGY), (124, mkt.constants.regions.GHA), (125, mkt.constants.regions.GIB), (126, mkt.constants.regions.GRL), (122, mkt.constants.regions.GMB), (55, mkt.constants.regions.GIN), (128, mkt.constants.regions.GLP), (110, mkt.constants.regions.GNQ), (17, mkt.constants.regions.GRC), (218, mkt.constants.regions.SGS), (25, mkt.constants.regions.GTM), (129, mkt.constants.regions.GUM), (46, mkt.constants.regions.GNB), (131, mkt.constants.regions.GUY), (136, mkt.constants.regions.HKG), (133, mkt.constants.regions.HMD), (135, mkt.constants.regions.HND), (102, mkt.constants.regions.HRV), (132, mkt.constants.regions.HTI), (13, mkt.constants.regions.HUN), (138, mkt.constants.regions.IDN), (140, mkt.constants.regions.IRL), (142, mkt.constants.regions.ISR), (141, mkt.constants.regions.IMN), (32, mkt.constants.regions.IND), (86, mkt.constants.regions.IOT), (139, mkt.constants.regions.IRQ), (137, mkt.constants.regions.ISL), (22, mkt.constants.regions.ITA), (144, mkt.constants.regions.JEY), (143, mkt.constants.regions.JAM), (51, mkt.constants.regions.JOR), (33, mkt.constants.regions.JPN), (56, mkt.constants.regions.KEN), (149, mkt.constants.regions.KGZ), (91, mkt.constants.regions.KHM), (146, mkt.constants.regions.KIR), (98, mkt.constants.regions.COM), (201, mkt.constants.regions.KNA), (147, mkt.constants.regions.KOR), (148, mkt.constants.regions.KWT), (94, mkt.constants.regions.CYM), (145, mkt.constants.regions.KAZ), (150, mkt.constants.regions.LAO), (152, mkt.constants.regions.LBN), (202, mkt.constants.regions.LCA), (156, mkt.constants.regions.LIE), (220, mkt.constants.regions.LKA), (154, mkt.constants.regions.LBR), (153, mkt.constants.regions.LSO), (38, mkt.constants.regions.LTU), (157, mkt.constants.regions.LUX), (151, mkt.constants.regions.LVA), (155, mkt.constants.regions.LBY), (173, mkt.constants.regions.MAR), (170, mkt.constants.regions.MCO), (169, mkt.constants.regions.MDA), (15, mkt.constants.regions.MNE), (255, mkt.constants.regions.MAF), (49, mkt.constants.regions.MDG), (164, mkt.constants.regions.MHL), (159, mkt.constants.regions.MKD), (48, mkt.constants.regions.MLI), (53, mkt.constants.regions.MMR), (171, mkt.constants.regions.MNG), (158, mkt.constants.regions.MAC), (184, mkt.constants.regions.MNP), (165, mkt.constants.regions.MTQ), (166, mkt.constants.regions.MRT), (172, mkt.constants.regions.MSR), (163, mkt.constants.regions.MLT), (50, mkt.constants.regions.MUS), (162, mkt.constants.regions.MDV), (160, mkt.constants.regions.MWI), (12, mkt.constants.regions.MEX), (161, mkt.constants.regions.MYS), (174, mkt.constants.regions.MOZ), (175, mkt.constants.regions.NAM), (179, mkt.constants.regions.NCL), (52, mkt.constants.regions.NER), (183, mkt.constants.regions.NFK), (181, mkt.constants.regions.NGA), (29, mkt.constants.regions.NIC), (178, mkt.constants.regions.NLD), (185, mkt.constants.regions.NOR), (177, mkt.constants.regions.NPL), (176, mkt.constants.regions.NRU), (182, mkt.constants.regions.NIU), (180, mkt.constants.regions.NZL), (186, mkt.constants.regions.OMN), (28, mkt.constants.regions.PAN), (18, mkt.constants.regions.PER), (119, mkt.constants.regions.PYF), (190, mkt.constants.regions.PNG), (35, mkt.constants.regions.PHL), (187, mkt.constants.regions.PAK), (11, mkt.constants.regions.POL), (204, mkt.constants.regions.SPM), (192, mkt.constants.regions.PCN), (194, mkt.constants.regions.PRI), (189, mkt.constants.regions.PSE), (193, mkt.constants.regions.PRT), (188, mkt.constants.regions.PLW), (191, mkt.constants.regions.PRY), (195, mkt.constants.regions.QAT), (196, mkt.constants.regions.REU), (197, mkt.constants.regions.ROU), (16, mkt.constants.regions.SRB), (36, mkt.constants.regions.RUS), (198, mkt.constants.regions.RWA), (209, mkt.constants.regions.SAU), (216, mkt.constants.regions.SLB), (210, mkt.constants.regions.SYC), (221, mkt.constants.regions.SDN), (225, mkt.constants.regions.SWE), (212, mkt.constants.regions.SGP), (200, mkt.constants.regions.SHN), (215, mkt.constants.regions.SVN), (223, mkt.constants.regions.SJM), (214, mkt.constants.regions.SVK), (211, mkt.constants.regions.SLE), (207, mkt.constants.regions.SMR), (41, mkt.constants.regions.SEN), (217, mkt.constants.regions.SOM), (222, mkt.constants.regions.SUR), (219, mkt.constants.regions.SSD), (208, mkt.constants.regions.STP), (24, mkt.constants.regions.SLV), (256, mkt.constants.regions.SXM), (227, mkt.constants.regions.SYR), (224, mkt.constants.regions.SWZ), (237, mkt.constants.regions.TCA), (95, mkt.constants.regions.TCD), (120, mkt.constants.regions.ATF), (231, mkt.constants.regions.TGO), (229, mkt.constants.regions.THA), (228, mkt.constants.regions.TJK), (232, mkt.constants.regions.TKL), (230, mkt.constants.regions.TLS), (236, mkt.constants.regions.TKM), (39, mkt.constants.regions.TUN), (233, mkt.constants.regions.TON), (235, mkt.constants.regions.TUR), (234, mkt.constants.regions.TTO), (238, mkt.constants.regions.TUV), (57, mkt.constants.regions.TWN), (44, mkt.constants.regions.TZA), (240, mkt.constants.regions.UKR), (239, mkt.constants.regions.UGA), (4, mkt.constants.regions.GBR), (257, mkt.constants.regions.UMI), (2, mkt.constants.regions.USA), (19, mkt.constants.regions.URY), (243, mkt.constants.regions.UZB), (134, mkt.constants.regions.VAT), (205, mkt.constants.regions.VCT), (10, mkt.constants.regions.VEN), (245, mkt.constants.regions.VGB), (246, mkt.constants.regions.VIR), (244, mkt.constants.regions.VNM), (47, mkt.constants.regions.VUT), (247, mkt.constants.regions.WLF), (206, mkt.constants.regions.WSM), (249, mkt.constants.regions.YEM), (167, mkt.constants.regions.MYT), (37, mkt.constants.regions.ZAF), (250, mkt.constants.regions.ZMB), (251, mkt.constants.regions.ZWE)])),
],
options={
'db_table': 'addons_excluded_regions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AddonUpsell',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'addon_upsell',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AddonUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role', models.PositiveSmallIntegerField(default=5, choices=[(5, 'Owner'), (4, 'Developer'), (1, 'Viewer'), (6, 'Support')])),
('listed', models.BooleanField(default=True, verbose_name='Listed')),
('position', models.IntegerField(default=0)),
],
options={
'db_table': 'addons_users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AppFeatures',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('has_apps', models.BooleanField(default=False, help_text='App Management API')),
('has_packaged_apps', models.BooleanField(default=False, help_text='Packaged Apps Install API')),
('has_pay', models.BooleanField(default=False, help_text='Web Payment')),
('has_activity', models.BooleanField(default=False, help_text='Web Activities')),
('has_light_events', models.BooleanField(default=False, help_text='Ambient Light Sensor')),
('has_archive', models.BooleanField(default=False, help_text='Archive')),
('has_battery', models.BooleanField(default=False, help_text='Battery')),
('has_bluetooth', models.BooleanField(default=False, help_text='Bluetooth')),
('has_contacts', models.BooleanField(default=False, help_text='Contacts')),
('has_device_storage', models.BooleanField(default=False, help_text='Device Storage')),
('has_indexeddb', models.BooleanField(default=False, help_text='IndexedDB')),
('has_geolocation', models.BooleanField(default=False, help_text='Geolocation')),
('has_idle', models.BooleanField(default=False, help_text='Idle')),
('has_network_info', models.BooleanField(default=False, help_text='Network Information')),
('has_network_stats', models.BooleanField(default=False, help_text='Network Stats')),
('has_proximity', models.BooleanField(default=False, help_text='Proximity')),
('has_push', models.BooleanField(default=False, help_text='Simple Push')),
('has_orientation', models.BooleanField(default=False, help_text='Screen Orientation')),
('has_time_clock', models.BooleanField(default=False, help_text='Time/Clock')),
('has_vibrate', models.BooleanField(default=False, help_text='Vibration')),
('has_fm', models.BooleanField(default=False, help_text='WebFM')),
('has_sms', models.BooleanField(default=False, help_text='WebSMS')),
('has_touch', models.BooleanField(default=False, help_text='Touch')),
('has_qhd', models.BooleanField(default=False, help_text='Smartphone-Sized Displays (qHD)')),
('has_mp3', models.BooleanField(default=False, help_text='MP3')),
('has_audio', models.BooleanField(default=False, help_text='Audio')),
('has_webaudio', models.BooleanField(default=False, help_text='Web Audio')),
('has_video_h264', models.BooleanField(default=False, help_text='H.264')),
('has_video_webm', models.BooleanField(default=False, help_text='WebM')),
('has_fullscreen', models.BooleanField(default=False, help_text='Full Screen')),
('has_gamepad', models.BooleanField(default=False, help_text='Gamepad')),
('has_quota', models.BooleanField(default=False, help_text='Quota Management')),
('has_camera', models.BooleanField(default=False, help_text='Camera')),
('has_mic', models.BooleanField(default=False, help_text='Microphone')),
('has_screen_capture', models.BooleanField(default=False, help_text='Screen Capture')),
('has_webrtc_media', models.BooleanField(default=False, help_text='WebRTC MediaStream')),
('has_webrtc_data', models.BooleanField(default=False, help_text='WebRTC DataChannel')),
('has_webrtc_peer', models.BooleanField(default=False, help_text='WebRTC PeerConnection')),
('has_speech_syn', models.BooleanField(default=False, help_text='Web Speech Synthesis')),
('has_speech_rec', models.BooleanField(default=False, help_text='Web Speech Recognition')),
('has_pointer_lock', models.BooleanField(default=False, help_text='Pointer Lock')),
('has_notification', models.BooleanField(default=False, help_text='Notifications')),
('has_alarm', models.BooleanField(default=False, help_text='Alarms')),
('has_systemxhr', models.BooleanField(default=False, help_text='SystemXHR')),
('has_tcpsocket', models.BooleanField(default=False, help_text='TCP Sockets')),
('has_thirdparty_keyboard_support', models.BooleanField(default=False, help_text='Third-Party Keyboard Support')),
('has_network_info_multiple', models.BooleanField(default=False, help_text='Multiple Network Information')),
('has_mobileid', models.BooleanField(default=False, help_text='Mobile ID')),
('has_precompile_asmjs', models.BooleanField(default=False, help_text='Asm.js Precompilation')),
('has_hardware_512mb_ram', models.BooleanField(default=False, help_text='512MB RAM Device')),
('has_hardware_1gb_ram', models.BooleanField(default=False, help_text='1GB RAM Device')),
('has_nfc', models.BooleanField(default=False, help_text='NFC')),
('has_openmobileacl', models.BooleanField(default=False, help_text='OpenMobile ACL')),
('version', models.OneToOneField(related_name='features', to='versions.Version')),
],
options={
'db_table': 'addons_features',
},
bases=(models.Model, mkt.site.models.DynamicBoolFieldsMixin),
),
migrations.CreateModel(
name='AppManifest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('manifest', models.TextField()),
('version', models.OneToOneField(related_name='manifest_json', to='versions.Version')),
],
options={
'db_table': 'app_manifest',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlockedSlug',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default=b'', unique=True, max_length=255)),
],
options={
'db_table': 'addons_blocked_slug',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContentRating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('ratings_body', models.PositiveIntegerField(choices=[(0, b'CLASSIND'), (1, 'Generic'), (2, b'USK'), (3, b'ESRB'), (4, b'PEGI')])),
('rating', models.PositiveIntegerField()),
],
options={
'db_table': 'webapps_contentrating',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Geodata',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('restricted', models.BooleanField(default=False)),
('popular_region', models.CharField(max_length=10, null=True)),
('banner_regions', mkt.webapps.models.RegionListField(null=True)),
('region_de_usk_exclude', models.BooleanField(default=False)),
('region_cn_status', models.PositiveIntegerField(default=2, help_text=b'China approval status', db_index=True, choices=[(0, 'Incomplete'), (16, 'Unlisted'), (2, 'Pending approval'), (4, 'Published'), (5, 'Banned from Marketplace'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved but private'), (15, 'Blocked')])),
('region_cn_nominated', models.DateTimeField(help_text=b'China nomination date', null=True)),
('region_br_iarc_exclude', models.BooleanField(default=False)),
('region_de_iarc_exclude', models.BooleanField(default=False)),
],
options={
'db_table': 'webapps_geodata',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IARCInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('submission_id', models.PositiveIntegerField()),
('security_code', models.CharField(max_length=10)),
],
options={
'db_table': 'webapps_iarc_info',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Installed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', models.CharField(unique=True, max_length=255, db_index=True)),
('premium_type', models.PositiveIntegerField(default=None, null=True, choices=[(0, 'Free'), (1, 'Premium'), (2, 'Premium with in-app payments'), (3, 'Free with in-app payments'), (4, "I'll use my own system for in-app payments")])),
('install_type', models.PositiveIntegerField(default=0, db_index=True, choices=[(0, 'User'), (1, 'Reviewer'), (2, 'Developer')])),
],
options={
'db_table': 'users_install',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Installs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('value', models.FloatField(default=0.0)),
('region', models.PositiveIntegerField(default=0, db_index=True)),
],
options={
'db_table': 'addons_installs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Preview',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('filetype', models.CharField(max_length=25)),
('thumbtype', models.CharField(max_length=25)),
('position', models.IntegerField(default=0)),
('sizes', django_extensions.db.fields.json.JSONField(max_length=25)),
],
options={
'ordering': ('position', 'created'),
'db_table': 'previews',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RatingDescriptors',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('has_esrb_mild_blood', models.BooleanField(default=False, help_text='Mild Blood')),
('has_esrb_mild_cartoon_violence', models.BooleanField(default=False, help_text='Mild Cartoon Violence')),
('has_classind_sex', models.BooleanField(default=False, help_text='Sexo')),
('has_classind_nudity', models.BooleanField(default=False, help_text='Nudez')),
('has_pegi_shares_location', models.BooleanField(default=False, help_text='Location data sharing')),
('has_pegi_digital_purchases', models.BooleanField(default=False, help_text='In-app purchase option')),
('has_esrb_mild_sexual_content', models.BooleanField(default=False, help_text='Mild Sexual Content')),
('has_pegi_sex_content', models.BooleanField(default=False, help_text='Sex')),
('has_esrb_drug_tobacco_use', models.BooleanField(default=False, help_text='Use of Drug and Tobacco')),
('has_classind_drugs', models.BooleanField(default=False, help_text='Drogas')),
('has_esrb_alcohol_tobacco_use', models.BooleanField(default=False, help_text='Use of Alcohol and Tobacco')),
('has_generic_violence', models.BooleanField(default=False, help_text='Violence')),
('has_generic_drugs', models.BooleanField(default=False, help_text='Drugs')),
('has_pegi_violence', models.BooleanField(default=False, help_text='Violence')),
('has_esrb_intense_violence', models.BooleanField(default=False, help_text='Intense Violence')),
('has_esrb_mild_fantasy_violence', models.BooleanField(default=False, help_text='Mild Fantasy Violence')),
('has_pegi_shares_info', models.BooleanField(default=False, help_text='Personal data sharing')),
('has_classind_criminal_acts', models.BooleanField(default=False, help_text='Atos Crim\xednosos')),
('has_esrb_sex_violence', models.BooleanField(default=False, help_text='Sexual Violence')),
('has_usk_alcohol', models.BooleanField(default=False, help_text='Alkoholkonsum')),
('has_esrb_mature_humor', models.BooleanField(default=False, help_text='Mature Humor')),
('has_usk_discrimination', models.BooleanField(default=False, help_text='Diskriminierung')),
('has_usk_lang', models.BooleanField(default=False, help_text='Explizite Sprache')),
('has_usk_scary', models.BooleanField(default=False, help_text='\xc4ngstigende Inhalte')),
('has_esrb_lang', models.BooleanField(default=False, help_text='Language')),
('has_esrb_violence', models.BooleanField(default=False, help_text='Violence')),
('has_esrb_mild_violence', models.BooleanField(default=False, help_text='Mild Violence')),
('has_esrb_mild_suggestive_themes', models.BooleanField(default=False, help_text='Mild Suggestive Themes ')),
('has_esrb_lyrics', models.BooleanField(default=False, help_text='Lyrics')),
('has_usk_violence', models.BooleanField(default=False, help_text='Gewalt')),
('has_esrb_blood_gore', models.BooleanField(default=False, help_text='Blood and Gore')),
('has_esrb_suggestive', models.BooleanField(default=False, help_text='Suggestive Themes')),
('has_esrb_sex_themes', models.BooleanField(default=False, help_text='Sexual Themes')),
('has_usk_sex_content', models.BooleanField(default=False, help_text='Erotik/Sexuelle Inhalte')),
('has_pegi_drugs', models.BooleanField(default=False, help_text='Drugs')),
('has_esrb_crime_instruct', models.BooleanField(default=False, help_text='Criminal Instruction')),
('has_pegi_online', models.BooleanField(default=False, help_text='Online')),
('has_usk_explicit_violence', models.BooleanField(default=False, help_text='Explizite Gewalt')),
('has_esrb_tobacco_ref', models.BooleanField(default=False, help_text='Tobacco Reference')),
('has_esrb_mild_lang', models.BooleanField(default=False, help_text='Mild Language')),
('has_esrb_tobacco_use', models.BooleanField(default=False, help_text='Use of Tobacco')),
('has_classind_violence', models.BooleanField(default=False, help_text='Viol\xeancia')),
('has_esrb_mild_sexual_themes', models.BooleanField(default=False, help_text='Mild Sexual Themes ')),
('has_usk_some_swearing', models.BooleanField(default=False, help_text='Gelegentliches Fluchen')),
('has_usk_abstract_violence', models.BooleanField(default=False, help_text='Abstrakte Gewalt')),
('has_pegi_gambling', models.BooleanField(default=False, help_text='Gambling')),
('has_esrb_drug_tobacco_ref', models.BooleanField(default=False, help_text='Drug and Tobacco Reference')),
('has_esrb_alcohol_use', models.BooleanField(default=False, help_text='Use of Alcohol')),
('has_esrb_alcohol_tobacco_ref', models.BooleanField(default=False, help_text='Alcohol and Tobacco Reference')),
('has_esrb_crude_humor', models.BooleanField(default=False, help_text='Crude Humor')),
('has_usk_nudity', models.BooleanField(default=False, help_text='Nacktheit/Erotik')),
('has_generic_sex_content', models.BooleanField(default=False, help_text='Sex')),
('has_esrb_animated_blood', models.BooleanField(default=False, help_text='Animated Blood')),
('has_generic_gambling', models.BooleanField(default=False, help_text='Gambling')),
('has_generic_discrimination', models.BooleanField(default=False, help_text='Discrimination')),
('has_generic_scary', models.BooleanField(default=False, help_text='Fear')),
('has_esrb_mild_lyrics', models.BooleanField(default=False, help_text='Mild Lyrics')),
('has_usk_sex_violence', models.BooleanField(default=False, help_text='Sexuelle Gewalt')),
('has_esrb_drug_alcohol_use', models.BooleanField(default=False, help_text='Use of Drug and Alcohol')),
('has_usk_horror', models.BooleanField(default=False, help_text='Grusel/Horror')),
('has_esrb_alcohol_ref', models.BooleanField(default=False, help_text='Alcohol Reference')),
('has_classind_sex_explicit', models.BooleanField(default=False, help_text='Sexo Expl\xedcito')),
('has_esrb_violence_ref', models.BooleanField(default=False, help_text='Violent References')),
('has_pegi_discrimination', models.BooleanField(default=False, help_text='Discrimination')),
('has_usk_some_scares', models.BooleanField(default=False, help_text='Seltene Schreckmomente')),
('has_usk_sex_ref', models.BooleanField(default=False, help_text='Sexuelle Andeutungen')),
('has_usk_drugs', models.BooleanField(default=False, help_text='Drogen')),
('has_esrb_nudity', models.BooleanField(default=False, help_text='Nudity')),
('has_pegi_lang', models.BooleanField(default=False, help_text='Language')),
('has_esrb_fantasy_violence', models.BooleanField(default=False, help_text='Fantasy Violence')),
('has_esrb_drug_use', models.BooleanField(default=False, help_text='Use of Drugs')),
('has_esrb_cartoon_violence', models.BooleanField(default=False, help_text='Cartoon Violence')),
('has_esrb_real_gambling', models.BooleanField(default=False, help_text='Real Gambling')),
('has_classind_drugs_legal', models.BooleanField(default=False, help_text='Drogas L\xedcitas')),
('has_esrb_comic_mischief', models.BooleanField(default=False, help_text='Comic Mischief')),
('has_classind_lang', models.BooleanField(default=False, help_text='Linguagem Impr\xf3pria')),
('has_classind_violence_extreme', models.BooleanField(default=False, help_text='Viol\xeancia Extrema')),
('has_esrb_drug_alcohol_ref', models.BooleanField(default=False, help_text='Drug and Alcohol Reference')),
('has_esrb_drug_alcohol_tobacco_ref', models.BooleanField(default=False, help_text='Drug, Alcohol and Tobacco Reference')),
('has_usk_drug_use', models.BooleanField(default=False, help_text='Drogenkonsum')),
('has_esrb_partial_nudity', models.BooleanField(default=False, help_text='Partial Nudity')),
('has_classind_sex_content', models.BooleanField(default=False, help_text='Conte\xfado Sexual')),
('has_esrb_strong_lyrics', models.BooleanField(default=False, help_text='Strong Lyrics')),
('has_esrb_strong_lang', models.BooleanField(default=False, help_text='Strong Language')),
('has_pegi_users_interact', models.BooleanField(default=False, help_text='Social interaction functionality')),
('has_classind_drugs_illegal', models.BooleanField(default=False, help_text='Drogas Il\xedcitas')),
('has_esrb_drug_alcohol_tobacco_use', models.BooleanField(default=False, help_text='Use of Drug, Alcohol and Tobacco')),
('has_usk_sex_violence_ref', models.BooleanField(default=False, help_text='Andeutungen Sexueller Gewalt')),
('has_esrb_sex_content', models.BooleanField(default=False, help_text='Sexual Content')),
('has_esrb_sim_gambling', models.BooleanField(default=False, help_text='Simulated Gambling')),
('has_esrb_blood', models.BooleanField(default=False, help_text='Blood')),
('has_esrb_crime', models.BooleanField(default=False, help_text='Crime')),
('has_esrb_strong_sex_content', models.BooleanField(default=False, help_text='Strong Sexual Content')),
('has_generic_lang', models.BooleanField(default=False, help_text='Language')),
('has_pegi_horror', models.BooleanField(default=False, help_text='Horror')),
('has_pegi_scary', models.BooleanField(default=False, help_text='Fear')),
('has_esrb_hate_speech', models.BooleanField(default=False, help_text='Hate Speech')),
('has_esrb_scary', models.BooleanField(default=False, help_text='Scary Themes')),
('has_esrb_drug_ref', models.BooleanField(default=False, help_text='Drug Reference')),
('has_classind_shocking', models.BooleanField(default=False, help_text='Conte\xfado Impactante')),
('has_generic_online', models.BooleanField(default=False, help_text='Online')),
('has_usk_tobacco', models.BooleanField(default=False, help_text='Tabakkonsum')),
],
options={
'db_table': 'webapps_rating_descriptors',
},
bases=(models.Model, mkt.site.models.DynamicBoolFieldsMixin),
),
migrations.CreateModel(
name='RatingInteractives',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('has_digital_purchases', models.BooleanField(default=False, help_text=b'Digital Purchases')),
('has_shares_location', models.BooleanField(default=False, help_text=b'Shares Location')),
('has_users_interact', models.BooleanField(default=False, help_text=b'Users Interact')),
('has_shares_info', models.BooleanField(default=False, help_text=b'Shares Info')),
],
options={
'db_table': 'webapps_rating_interactives',
},
bases=(models.Model, mkt.site.models.DynamicBoolFieldsMixin),
),
migrations.CreateModel(
name='Trending',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('value', models.FloatField(default=0.0)),
('region', models.PositiveIntegerField(default=0, db_index=True)),
],
options={
'db_table': 'addons_trending',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Webapp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('guid', models.CharField(max_length=255, unique=True, null=True)),
('app_slug', models.CharField(max_length=30, unique=True, null=True, blank=True)),
('default_locale', models.CharField(default=b'en-US', max_length=10, db_column=b'defaultlocale')),
('status', models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (16, 'Unlisted'), (2, 'Pending approval'), (4, 'Published'), (5, 'Banned from Marketplace'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved but private'), (15, 'Blocked')])),
('highest_status', models.PositiveSmallIntegerField(default=0, help_text=b'An upper limit for what an author can change.', db_column=b'higheststatus', choices=[(0, 'Incomplete'), (16, 'Unlisted'), (2, 'Pending approval'), (4, 'Published'), (5, 'Banned from Marketplace'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved but private'), (15, 'Blocked')])),
('icon_type', models.CharField(max_length=25, db_column=b'icontype', blank=True)),
('icon_hash', models.CharField(max_length=8, null=True, blank=True)),
('average_rating', models.FloatField(default=0, db_column=b'averagerating')),
('bayesian_rating', models.FloatField(default=0, db_column=b'bayesianrating', db_index=True)),
('total_reviews', models.PositiveIntegerField(default=0, db_column=b'totalreviews')),
('last_updated', models.DateTimeField(help_text=b'Last time this add-on had a file/version update', null=True, db_index=True)),
('disabled_by_user', models.BooleanField(default=False, db_index=True, db_column=b'inactive')),
('promo_img_hash', models.CharField(max_length=8, null=True, blank=True)),
('public_stats', models.BooleanField(default=False, db_column=b'publicstats')),
('categories', django_extensions.db.fields.json.JSONField()),
('premium_type', models.PositiveSmallIntegerField(default=0, choices=[(0, 'Free'), (1, 'Premium'), (2, 'Premium with in-app payments'), (3, 'Free with in-app payments'), (4, "I'll use my own system for in-app payments")])),
('manifest_url', models.URLField(max_length=255, null=True, blank=True)),
('app_domain', models.CharField(db_index=True, max_length=255, null=True, blank=True)),
('publish_type', models.PositiveSmallIntegerField(default=0)),
('mozilla_contact', models.EmailField(max_length=75, blank=True)),
('vip_app', models.BooleanField(default=False)),
('priority_review', models.BooleanField(default=False)),
('is_packaged', models.BooleanField(default=False, db_index=True)),
('enable_new_regions', models.BooleanField(default=True, db_index=True)),
('iarc_purged', models.BooleanField(default=False)),
('solitude_public_id', models.CharField(max_length=255, null=True, blank=True)),
('is_offline', models.BooleanField(default=False)),
('_current_version', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, db_column=b'current_version', to='versions.Version', null=True)),
('_latest_version', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, db_column=b'latest_version', to='versions.Version', null=True)),
('authors', models.ManyToManyField(related_name='addons', through='webapps.AddonUser', to=settings.AUTH_USER_MODEL)),
('description', mkt.translations.fields.PurifiedField(related_name='Webapp_description_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'description', to_field=b'id', blank=True, to=mkt.translations.models.PurifiedTranslation, short=False, require_locale=True, unique=True)),
('homepage', mkt.translations.fields.TranslatedField(related_name='Webapp_homepage_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'homepage', to_field=b'id', blank=True, to=mkt.translations.models.Translation, short=True, require_locale=True, unique=True)),
('name', mkt.translations.fields.TranslatedField(related_name='Webapp_name_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'name', default=None, to_field=b'id', to=mkt.translations.models.Translation, short=True, blank=True, require_locale=True, unique=True)),
('privacy_policy', mkt.translations.fields.PurifiedField(related_name='Webapp_privacy_policy_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'privacypolicy', to_field=b'id', blank=True, to=mkt.translations.models.PurifiedTranslation, short=True, require_locale=True, unique=True)),
('support_email', mkt.translations.fields.TranslatedField(related_name='Webapp_support_email_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'supportemail', to_field=b'id', blank=True, to=mkt.translations.models.Translation, short=True, require_locale=True, unique=True)),
('support_url', mkt.translations.fields.TranslatedField(related_name='Webapp_support_url_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'supporturl', to_field=b'id', blank=True, to=mkt.translations.models.Translation, short=True, require_locale=True, unique=True)),
('tags', models.ManyToManyField(to='tags.Tag')),
],
options={
'db_table': 'addons',
},
bases=(mkt.webapps.models.UUIDModelMixin, mkt.site.models.OnChangeMixin, models.Model),
),
migrations.AddField(
model_name='trending',
name='addon',
field=models.ForeignKey(related_name='trending', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='trending',
unique_together=set([('addon', 'region')]),
),
migrations.AddField(
model_name='ratinginteractives',
name='addon',
field=models.OneToOneField(related_name='rating_interactives', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='ratingdescriptors',
name='addon',
field=models.OneToOneField(related_name='rating_descriptors', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='preview',
name='addon',
field=models.ForeignKey(related_name='previews', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='preview',
name='caption',
field=mkt.translations.fields.TranslatedField(related_name='Preview_caption_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'caption', to_field=b'id', blank=True, to=mkt.translations.models.Translation, short=True, require_locale=True, unique=True),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='preview',
index_together=set([('addon', 'position', 'created')]),
),
migrations.AddField(
model_name='installs',
name='addon',
field=models.ForeignKey(related_name='popularity', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='installs',
unique_together=set([('addon', 'region')]),
),
migrations.AddField(
model_name='installed',
name='addon',
field=models.ForeignKey(related_name='installed', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='installed',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='installed',
unique_together=set([('addon', 'user', 'install_type')]),
),
migrations.AddField(
model_name='iarcinfo',
name='addon',
field=models.OneToOneField(related_name='iarc_info', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='geodata',
name='addon',
field=models.OneToOneField(related_name='_geodata', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='geodata',
name='banner_message',
field=mkt.translations.fields.PurifiedField(related_name='Geodata_banner_message_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'banner_message', to_field=b'id', blank=True, to=mkt.translations.models.PurifiedTranslation, short=True, require_locale=True, unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='contentrating',
name='addon',
field=models.ForeignKey(related_name='content_ratings', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='contentrating',
unique_together=set([('addon', 'ratings_body')]),
),
migrations.AddField(
model_name='addonuser',
name='addon',
field=models.ForeignKey(to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='addonuser',
name='user',
field=mkt.users.models.UserForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='addonuser',
unique_together=set([('addon', 'user')]),
),
migrations.AlterIndexTogether(
name='addonuser',
index_together=set([('addon', 'user', 'listed'), ('addon', 'listed')]),
),
migrations.AddField(
model_name='addonupsell',
name='free',
field=models.ForeignKey(related_name='_upsell_from', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='addonupsell',
name='premium',
field=models.ForeignKey(related_name='_upsell_to', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='addonupsell',
unique_together=set([('free', 'premium')]),
),
migrations.AddField(
model_name='addonexcludedregion',
name='addon',
field=models.ForeignKey(related_name='addonexcludedregion', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='addonexcludedregion',
unique_together=set([('addon', 'region')]),
),
migrations.AddField(
model_name='addondevicetype',
name='addon',
field=models.ForeignKey(to='webapps.Webapp'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='addondevicetype',
unique_together=set([('addon', 'device_type')]),
),
]
| bsd-3-clause |
dochang/ansible | test/units/errors/test_errors.py | 62 | 4015 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.errors import AnsibleError
from ansible.compat.tests import BUILTINS
from ansible.compat.tests.mock import mock_open, patch
class TestErrors(unittest.TestCase):
def setUp(self):
self.message = 'This is the error message'
self.unicode_message = 'This is an error with \xf0\x9f\x98\xa8 in it'
self.obj = AnsibleBaseYAMLObject()
def tearDown(self):
pass
def test_basic_error(self):
e = AnsibleError(self.message)
self.assertEqual(e.message, self.message)
self.assertEqual(e.__repr__(), self.message)
def test_basic_unicode_error(self):
e = AnsibleError(self.unicode_message)
self.assertEqual(e.message, self.unicode_message)
self.assertEqual(e.__repr__(), self.unicode_message)
@patch.object(AnsibleError, '_get_error_lines_from_file')
def test_error_with_object(self, mock_method):
self.obj.ansible_pos = ('foo.yml', 1, 1)
mock_method.return_value = ('this is line 1\n', '')
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
def test_get_error_lines_from_file(self):
m = mock_open()
m.return_value.readlines.return_value = ['this is line 1\n']
with patch('{0}.open'.format(BUILTINS), m):
# this line will be found in the file
self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
# this line will not be found, as it is out of the index range
self.obj.ansible_pos = ('foo.yml', 2, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
m = mock_open()
m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n']
with patch('{0}.open'.format(BUILTINS), m):
# this line will be found in the file
self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.unicode_message, self.obj)
self.assertEqual(e.message, "This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ here\n")
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/api/patch_collection.py | 3 | 1229 | import matplotlib
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import pylab
fig=pylab.figure()
ax=fig.add_subplot(111)
resolution = 50 # the number of vertices
N = 3
x = pylab.rand(N)
y = pylab.rand(N)
radii = 0.1*pylab.rand(N)
patches = []
for x1,y1,r in zip(x, y, radii):
circle = Circle((x1,y1), r)
patches.append(circle)
x = pylab.rand(N)
y = pylab.rand(N)
radii = 0.1*pylab.rand(N)
theta1 = 360.0*pylab.rand(N)
theta2 = 360.0*pylab.rand(N)
for x1,y1,r,t1,t2 in zip(x, y, radii, theta1, theta2):
wedge = Wedge((x1,y1), r, t1, t2)
patches.append(wedge)
# Some limiting conditions on Wedge
patches += [
Wedge((.3,.7), .1, 0, 360), # Full circle
Wedge((.7,.8), .2, 0, 360, width=0.05), # Full ring
Wedge((.8,.3), .2, 0, 45), # Full sector
Wedge((.8,.3), .2, 45, 90, width=0.10), # Ring sector
]
for i in range(N):
polygon = Polygon(pylab.rand(N,2), True)
patches.append(polygon)
colors = 100*pylab.rand(len(patches))
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(pylab.array(colors))
ax.add_collection(p)
pylab.colorbar(p)
pylab.show()
| gpl-2.0 |
darolt/ndnSIMQoS | bindings/python/ns3modulegen_core_customizations.py | 53 | 19290 | import re
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
import sys
class SmartPointerTransformation(typehandlers.TypeTransformation):
"""
This class provides a "type transformation" that tends to support
NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
transformed into something like Parameter.new("Foo*", "foo",
transfer_ownership=False). Return values such as Ptr<Foo> are
transformed into ReturnValue.new("Foo*",
caller_owns_return=False). Since the underlying objects have
reference counting, PyBindGen does the right thing.
"""
def __init__(self):
super(SmartPointerTransformation, self).__init__()
self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
def _get_untransformed_type_traits(self, name):
m = self.rx.match(name)
is_const = False
if m is None:
return None, False
else:
name1 = m.group(2).strip()
if name1.startswith('const '):
name1 = name1[len('const '):]
is_const = True
if name1.endswith(' const'):
name1 = name1[:-len(' const')]
is_const = True
new_name = name1+' *'
if new_name.startswith('::'):
new_name = new_name[2:]
return new_name, is_const
def get_untransformed_name(self, name):
new_name, dummy_is_const = self._get_untransformed_type_traits(name)
return new_name
def create_type_handler(self, type_handler, *args, **kwargs):
if issubclass(type_handler, Parameter):
kwargs['transfer_ownership'] = False
elif issubclass(type_handler, ReturnValue):
kwargs['caller_owns_return'] = False
else:
raise AssertionError
## fix the ctype, add ns3:: namespace
orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
if is_const:
correct_ctype = 'ns3::Ptr< %s const >' % orig_ctype[:-2]
else:
correct_ctype = 'ns3::Ptr< %s >' % orig_ctype[:-2]
args = tuple([correct_ctype] + list(args[1:]))
handler = type_handler(*args, **kwargs)
handler.set_tranformation(self, orig_ctype)
return handler
def untransform(self, type_handler, declarations, code_block, expression):
return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
def transform(self, type_handler, declarations, code_block, expression):
assert type_handler.untransformed_ctype[-1] == '*'
return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
## register the type transformation
transf = SmartPointerTransformation()
typehandlers.return_type_matcher.register_transformation(transf)
typehandlers.param_type_matcher.register_transformation(transf)
del transf
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'!PyString_Check(item)',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'%s[%s] = PyString_AsString(item);' % (name, idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
"""
Class that generates a proxy virtual method that calls a similarly named python method.
"""
def __init__(self, return_value, parameters):
super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
def generate_python_call(self):
"""code to call the python method"""
build_params = self.build_params.get_parameters(force_tuple_creation=True)
if build_params[0][0] == '"':
build_params[0] = '(char *) ' + build_params[0]
args = self.before_call.declare_variable('PyObject*', 'args')
self.before_call.write_code('%s = Py_BuildValue(%s);'
% (args, ', '.join(build_params)))
self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
self.before_call.write_error_check('py_retval == NULL')
self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
def generate_callback_classes(out, callbacks):
for callback_impl_num, template_parameters in enumerate(callbacks):
sink = MemoryCodeSink()
cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
#print >> sys.stderr, "***** trying to register callback: %r" % cls_name
class_name = "PythonCallbackImpl%i" % callback_impl_num
sink.writeln('''
class %s : public ns3::CallbackImpl<%s>
{
public:
PyObject *m_callback;
%s(PyObject *callback)
{
Py_INCREF(callback);
m_callback = callback;
}
virtual ~%s()
{
Py_DECREF(m_callback);
m_callback = NULL;
}
virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
{
const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
if (other != NULL)
return (other->m_callback == m_callback);
else
return false;
}
''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
sink.indent()
callback_return = template_parameters[0]
return_ctype = ctypeparser.parse_type(callback_return)
if ('const' in return_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
return_type = ReturnValue.new(str(return_ctype), **kwargs)
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
% (callback_return, cls_name, ex),
Warning)
continue
arguments = []
ok = True
callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
for arg_num, arg_type in enumerate(callback_parameters):
arg_name = 'arg%i' % (arg_num+1)
param_ctype = ctypeparser.parse_type(arg_type)
if ('const' in param_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
% (arg_type, arg_name, cls_name, ex),
Warning)
ok = False
if not ok:
continue
wrapper = CallbackImplProxyMethod(return_type, arguments)
wrapper.generate(sink, 'operator()', decl_modifiers=[])
sink.unindent()
sink.writeln('};\n')
sink.flush_to(out)
class PythonCallbackParameter(Parameter):
"Class handlers"
CTYPES = [cls_name]
print >> sys.stderr, "***** registering callback handler: %r" % ctypeparser.normalize_type_string(cls_name)
DIRECTIONS = [Parameter.DIRECTION_IN]
PYTHON_CALLBACK_IMPL_NAME = class_name
TEMPLATE_ARGS = template_parameters
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
if self.default_value is None:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
callback_impl = wrapper.declarations.declare_variable(
'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
'%s_cb_impl' % self.name)
wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
% (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.call_params.append(
'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
else:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
value = wrapper.declarations.declare_variable(
'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
self.name+'_value',
self.default_value)
wrapper.before_call.write_code("if (%s) {" % (py_callback,))
wrapper.before_call.indent()
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
% (value, ', '.join(self.TEMPLATE_ARGS),
self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes: if (py_callback) {
wrapper.call_params.append(value)
def convert_c_to_python(self, wrapper):
raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
"(python using callbacks defined in C++) not implemented.")
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def Object_customizations(module):
## ---------------------------------------------------------------------
## Here we generate custom constructor code for all classes that
## derive from ns3::Object. The custom constructors are needed in
## order to support kwargs only and to translate kwargs into ns3
## attributes, etc.
## ---------------------------------------------------------------------
try:
Object = module['ns3::Object']
except KeyError:
return
## add a GetTypeId method to all generatd helper classes
def helper_class_hook(helper_class):
decl = """
static ns3::TypeId GetTypeId (void)
{
static ns3::TypeId tid = ns3::TypeId ("%s")
.SetParent< %s > ()
;
return tid;
}""" % (helper_class.name, helper_class.class_.full_name)
helper_class.add_custom_method(decl)
helper_class.add_post_generation_code(
"NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
Object.add_helper_class_hook(helper_class_hook)
def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)"
% cpp_class.full_name)
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
code_block.write_code("%s->Ref ();" % (lvalue))
def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
Object.set_instance_creation_function(ns3_object_instance_creation_function)
Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
def Attribute_customizations(module):
# Fix up for the "const AttributeValue &v = EmptyAttribute()"
# case, as used extensively by helper classes.
# Here's why we need to do this: pybindgen.gccxmlscanner, when
# scanning parameter default values, is only provided with the
# value as a simple C expression string. (py)gccxml does not
# report the type of the default value.
# As a workaround, here we iterate over all parameters of all
# methods of all classes and tell pybindgen what is the type of
# the default value for attributes.
for cls in module.classes:
for meth in cls.get_all_methods():
for param in meth.parameters:
if isinstance(param, cppclass.CppClassRefParameter):
if param.cpp_class.name == 'AttributeValue' \
and param.default_value is not None \
and param.default_value_type is None:
param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
add_std_ios_openmode(module)
def add_std_ios_openmode(module):
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
| gpl-2.0 |
pudo/aleph | aleph/migrate/versions/d17d4d4fd1ee_add_event_log.py | 4 | 1225 | """Add event log.
Revision ID: d17d4d4fd1ee
Revises: 95779b509fe4
Create Date: 2016-07-22 12:21:14.296489
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'd17d4d4fd1ee'
down_revision = '95779b509fe4'
def upgrade():
op.create_table('event_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('action', sa.Unicode(length=255), nullable=True),
sa.Column('source_ip', sa.Unicode(length=255), nullable=True),
sa.Column('path', sa.Unicode(), nullable=True),
sa.Column('query', postgresql.JSONB(), nullable=True),
sa.Column('data', postgresql.JSONB(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_event_log_action'), 'event_log', ['action'], unique=False)
def downgrade():
op.drop_index(op.f('ix_event_log_action'), table_name='event_log')
op.drop_table('event_log')
| mit |
j00bar/ansible | lib/ansible/modules/windows/win_chocolatey.py | 60 | 3973 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <trond@hindenes.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = r'''
---
module: win_chocolatey
version_added: "1.9"
short_description: Installs packages using chocolatey
description:
- Installs packages using Chocolatey (U(http://chocolatey.org/)).
- If Chocolatey is missing from the system, the module will install it.
- List of packages can be found at U(http://chocolatey.org/packages)
options:
name:
description:
- Name of the package to be installed.
required: true
state:
description:
- State of the package on the system.
choices:
- present
- absent
- latest
- reinstalled
default: present
force:
description:
- Forces install of the package (even if it already exists).
- Using C(force) will cause ansible to always report that a change was made.
choices:
- yes
- no
default: no
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version.
- As of Ansible v2.3 this is deprecated, set parameter C(state) to "latest" for the same result.
choices:
- yes
- no
default: no
version:
description:
- Specific version of the package to be installed.
- Ignored when C(state) is set to "absent".
source:
description:
- Specify source rather than using default chocolatey repository.
install_args:
description:
- Arguments to pass to the native installer.
version_added: '2.1'
params:
description:
- Parameters to pass to the package
version_added: '2.1'
allow_empty_checksums:
description:
- Allow empty checksums to be used.
default: false
version_added: '2.2'
ignore_checksums:
description:
- Ignore checksums altogether.
default: false
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself.
default: false
version_added: '2.1'
timeout:
description:
- The time to allow chocolatey to finish before timing out.
default: 2700
version_added: '2.3'
aliases: [ execution_timeout ]
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
EXAMPLES = r'''
# Install git
win_chocolatey:
name: git
state: present
# Upgrade installed packages
win_chocolatey:
name: all
state: latest
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: '6.6'
# Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
# Uninstall git
win_chocolatey:
name: git
state: absent
'''
| gpl-3.0 |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/test/test_descrtut.py | 75 | 12052 | # This contains most of the executable examples from Guido's descr
# tutorial, once at
#
# http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.test_support import sortdict
import pprint
class defaultdict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_1 = """
Here's the new type at work:
>>> print defaultdict # show our type
<class 'test.test_descrtut.defaultdict'>
>>> print type(defaultdict) # its metatype
<type 'type'>
>>> a = defaultdict(default=0.0) # create an instance
>>> print a # show the instance
{}
>>> print type(a) # show its type
<class 'test.test_descrtut.defaultdict'>
>>> print a.__class__ # show its class
<class 'test.test_descrtut.defaultdict'>
>>> print type(a) is a.__class__ # its type is its class
True
>>> a[1] = 3.25 # modify the instance
>>> print a # show the new value
{1: 3.25}
>>> print a[1] # show the new item
3.25
>>> print a[0] # a non-existent item
0.0
>>> a.merge({1:100, 2:200}) # use a dict method
>>> print sortdict(a) # show the result
{1: 3.25, 2: 200}
>>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
>>> def sorted(seq):
... seq.sort(key=str)
... return seq
>>> print sorted(a.keys())
[1, 2]
>>> exec "x = 3; print x" in a
3
>>> print sorted(a.keys())
[1, 2, '__builtins__', 'x']
>>> print a['x']
3
>>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
>>> a.default = -1
>>> print a["noway"]
-1
>>> a.default = -1000
>>> print a["noway"]
-1000
>>> 'default' in dir(a)
True
>>> a.x1 = 100
>>> a.x2 = 200
>>> print a.x1
100
>>> d = dir(a)
>>> 'default' in d and 'x1' in d and 'x2' in d
True
>>> print sortdict(a.__dict__)
{'default': -1000, 'x1': 100, 'x2': 200}
>>>
"""
class defaultdict2(dict):
__slots__ = ['default']
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
>>> a = defaultdict2(default=0.0)
>>> a[1]
0.0
>>> a.default = -1
>>> a[1]
-1
>>> a.x1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'defaultdict2' object has no attribute 'x1'
>>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
>>> type([])
<type 'list'>
>>> [].__class__
<type 'list'>
>>> list
<type 'list'>
>>> isinstance([], list)
True
>>> isinstance([], dict)
False
>>> isinstance([], object)
True
>>>
Under the new proposal, the __methods__ attribute no longer exists:
>>> [].__methods__
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'list' object has no attribute '__methods__'
>>>
Instead, you can get the same information from the list type:
>>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted
['__add__',
'__class__',
'__contains__',
'__delattr__',
'__delitem__',
'__delslice__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__getslice__',
'__gt__',
'__hash__',
'__iadd__',
'__imul__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mul__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__reversed__',
'__rmul__',
'__setattr__',
'__setitem__',
'__setslice__',
'__sizeof__',
'__str__',
'__subclasshook__',
'append',
'count',
'extend',
'index',
'insert',
'pop',
'remove',
'reverse',
'sort']
The new introspection API gives more information than the old one: in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
>>> a = ['tic', 'tac']
>>> list.__len__(a) # same as len(a)
2
>>> a.__len__() # ditto
2
>>> list.append(a, 'toe') # same as a.append('toe')
>>> a
['tic', 'tac', 'toe']
>>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
>>> class C:
...
... @staticmethod
... def foo(x, y):
... print "staticmethod", x, y
>>> C.foo(1, 2)
staticmethod 1 2
>>> c = C()
>>> c.foo(1, 2)
staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
>>> class C:
... @classmethod
... def foo(cls, y):
... print "classmethod", cls, y
>>> C.foo(1)
classmethod test.test_descrtut.C 1
>>> c = C()
>>> c.foo(1)
classmethod test.test_descrtut.C 1
>>> class D(C):
... pass
>>> D.foo(1)
classmethod test.test_descrtut.D 1
>>> d = D()
>>> d.foo(1)
classmethod test.test_descrtut.D 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
>>> class E(C):
... @classmethod
... def foo(cls, y): # override C.foo
... print "E.foo() called"
... C.foo(y)
>>> E.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
>>> e = E()
>>> e.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
>>> class property(object):
...
... def __init__(self, get, set=None):
... self.__get = get
... self.__set = set
...
... def __get__(self, inst, type=None):
... return self.__get(inst)
...
... def __set__(self, inst, value):
... if self.__set is None:
... raise AttributeError, "this attribute is read-only"
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and setx():
>>> class C(object):
...
... def __init__(self):
... self.__x = 0
...
... def getx(self):
... return self.__x
...
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
...
... x = property(getx, setx)
Here's a small demonstration:
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
Hmm -- property is builtin now, so let's try it that way too.
>>> del property # unmask the builtin
>>> property
<type 'property'>
>>> class C(object):
... def __init__(self):
... self.__x = 0
... def getx(self):
... return self.__x
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
... x = property(getx, setx)
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A: # classic class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called A.save()
>>> class A(object): # new class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
"""
class A(object):
def m(self):
return "A"
class B(A):
def m(self):
return "B" + super(B, self).m()
class C(A):
def m(self):
return "C" + super(C, self).m()
class D(C, B):
def m(self):
return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print D().m() # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
... def foo(self):
... print "called A.foo()"
>>> class B(A):
... pass
>>> class C(A):
... def foo(self):
... B.foo(self)
>>> C().foo()
Traceback (most recent call last):
...
TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
>>> class C(A):
... def foo(self):
... A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
"tut2": test_2,
"tut3": test_3,
"tut4": test_4,
"tut5": test_5,
"tut6": test_6,
"tut7": test_7,
"tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
# Obscure: import this module as test.test_descrtut instead of as
# plain test_descrtut because the name of this module works its way
# into the doctest examples, and unless the full test.test_descrtut
# business is used the name can change depending on how the test is
# invoked.
from test import test_support, test_descrtut
test_support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| apache-2.0 |
doduytrung/odoo-8.0 | addons/hr_holidays/wizard/__init__.py | 442 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_holidays_summary_department
import hr_holidays_summary_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CatsAndDogsbvba/odoo | addons/account_payment/wizard/account_payment_pay.py | 382 | 2448 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
#TODO:REMOVE this wizard is not used
class account_payment_make_payment(osv.osv_memory):
_name = "account.payment.make.payment"
_description = "Account make payment"
def launch_wizard(self, cr, uid, ids, context=None):
"""
Search for a wizard to launch according to the type.
If type is manual. just confirm the order.
"""
obj_payment_order = self.pool.get('payment.order')
if context is None:
context = {}
# obj_model = self.pool.get('ir.model.data')
# obj_act = self.pool.get('ir.actions.act_window')
# order = obj_payment_order.browse(cr, uid, context['active_id'], context)
obj_payment_order.set_done(cr, uid, [context['active_id']], context)
return {'type': 'ir.actions.act_window_close'}
# t = order.mode and order.mode.type.code or 'manual'
# if t == 'manual':
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# gw = obj_payment_order.get_wizard(t)
# if not gw:
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# module, wizard= gw
# result = obj_model._get_id(cr, uid, module, wizard)
# id = obj_model.read(cr, uid, [result], ['res_id'])[0]['res_id']
# return obj_act.read(cr, uid, [id])[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/words/xish/xpath.py | 67 | 9290 | # -*- test-case-name: twisted.words.test.test_xpath -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XPath query support.
This module provides L{XPathQuery} to match
L{domish.Element<twisted.words.xish.domish.Element>} instances against
XPath-like expressions.
"""
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class LiteralValue(str):
def value(self, elem):
return self
class IndexValue:
def __init__(self, index):
self.index = int(index) - 1
def value(self, elem):
return elem.children[self.index]
class AttribValue:
def __init__(self, attribname):
self.attribname = attribname
if self.attribname == "xmlns":
self.value = self.value_ns
def value_ns(self, elem):
return elem.uri
def value(self, elem):
if self.attribname in elem.attributes:
return elem.attributes[self.attribname]
else:
return None
class CompareValue:
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.rhs = rhs
if op == "=":
self.value = self._compareEqual
else:
self.value = self._compareNotEqual
def _compareEqual(self, elem):
return self.lhs.value(elem) == self.rhs.value(elem)
def _compareNotEqual(self, elem):
return self.lhs.value(elem) != self.rhs.value(elem)
class BooleanValue:
"""
Provide boolean XPath expression operators.
@ivar lhs: Left hand side expression of the operator.
@ivar op: The operator. One of C{'and'}, C{'or'}.
@ivar rhs: Right hand side expression of the operator.
@ivar value: Reference to the method that will calculate the value of
this expression given an element.
"""
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.rhs = rhs
if op == "and":
self.value = self._booleanAnd
else:
self.value = self._booleanOr
def _booleanAnd(self, elem):
"""
Calculate boolean and of the given expressions given an element.
@param elem: The element to calculate the value of the expression from.
"""
return self.lhs.value(elem) and self.rhs.value(elem)
def _booleanOr(self, elem):
"""
Calculate boolean or of the given expressions given an element.
@param elem: The element to calculate the value of the expression from.
"""
return self.lhs.value(elem) or self.rhs.value(elem)
def Function(fname):
"""
Internal method which selects the function object
"""
klassname = "_%s_Function" % fname
c = globals()[klassname]()
return c
class _not_Function:
def __init__(self):
self.baseValue = None
def setParams(self, baseValue):
self.baseValue = baseValue
def value(self, elem):
return not self.baseValue.value(elem)
class _text_Function:
def setParams(self):
pass
def value(self, elem):
return str(elem)
class _Location:
def __init__(self):
self.predicates = []
self.elementName = None
self.childLocation = None
def matchesPredicates(self, elem):
if self.elementName != None and self.elementName != elem.name:
return 0
for p in self.predicates:
if not p.value(elem):
return 0
return 1
def matches(self, elem):
if not self.matchesPredicates(elem):
return 0
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return 1
else:
return 1
return 0
def queryForString(self, elem, resultbuf):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForString(c, resultbuf)
else:
resultbuf.write(str(elem))
def queryForNodes(self, elem, resultlist):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForNodes(c, resultlist)
else:
resultlist.append(elem)
def queryForStringList(self, elem, resultlist):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForStringList(c, resultlist)
else:
for c in elem.children:
if isinstance(c, (str, unicode)):
resultlist.append(c)
class _AnyLocation:
def __init__(self):
self.predicates = []
self.elementName = None
self.childLocation = None
def matchesPredicates(self, elem):
for p in self.predicates:
if not p.value(elem):
return 0
return 1
def listParents(self, elem, parentlist):
if elem.parent != None:
self.listParents(elem.parent, parentlist)
parentlist.append(elem.name)
def isRootMatch(self, elem):
if (self.elementName == None or self.elementName == elem.name) and \
self.matchesPredicates(elem):
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return True
else:
return True
return False
def findFirstRootMatch(self, elem):
if (self.elementName == None or self.elementName == elem.name) and \
self.matchesPredicates(elem):
# Thus far, the name matches and the predicates match,
# now check into the children and find the first one
# that matches the rest of the structure
# the rest of the structure
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return c
return None
else:
# No children locations; this is a match!
return elem
else:
# Ok, predicates or name didn't match, so we need to start
# down each child and treat it as the root and try
# again
for c in elem.elements():
if self.matches(c):
return c
# No children matched...
return None
def matches(self, elem):
if self.isRootMatch(elem):
return True
else:
# Ok, initial element isn't an exact match, walk
# down each child and treat it as the root and try
# again
for c in elem.elements():
if self.matches(c):
return True
# No children matched...
return False
def queryForString(self, elem, resultbuf):
raise NotImplementedError(
"queryForString is not implemented for any location")
def queryForNodes(self, elem, resultlist):
# First check to see if _this_ element is a root
if self.isRootMatch(elem):
resultlist.append(elem)
# Now check each child
for c in elem.elements():
self.queryForNodes(c, resultlist)
def queryForStringList(self, elem, resultlist):
if self.isRootMatch(elem):
for c in elem.children:
if isinstance(c, (str, unicode)):
resultlist.append(c)
for c in elem.elements():
self.queryForStringList(c, resultlist)
class XPathQuery:
def __init__(self, queryStr):
self.queryStr = queryStr
from twisted.words.xish.xpathparser import parse
self.baseLocation = parse('XPATH', queryStr)
def __hash__(self):
return self.queryStr.__hash__()
def matches(self, elem):
return self.baseLocation.matches(elem)
def queryForString(self, elem):
result = StringIO.StringIO()
self.baseLocation.queryForString(elem, result)
return result.getvalue()
def queryForNodes(self, elem):
result = []
self.baseLocation.queryForNodes(elem, result)
if len(result) == 0:
return None
else:
return result
def queryForStringList(self, elem):
result = []
self.baseLocation.queryForStringList(elem, result)
if len(result) == 0:
return None
else:
return result
__internedQueries = {}
def internQuery(queryString):
if queryString not in __internedQueries:
__internedQueries[queryString] = XPathQuery(queryString)
return __internedQueries[queryString]
def matches(xpathstr, elem):
return internQuery(xpathstr).matches(elem)
def queryForStringList(xpathstr, elem):
return internQuery(xpathstr).queryForStringList(elem)
def queryForString(xpathstr, elem):
return internQuery(xpathstr).queryForString(elem)
def queryForNodes(xpathstr, elem):
return internQuery(xpathstr).queryForNodes(elem)
| bsd-3-clause |
rcharp/Simple | app/pages/calc.py | 1 | 7278 | from date import one_month_ago, now, two_months_ago, get_datetime_string, get_datetime, get_date_string, get_short_date_string, \
datetime_to_int, \
yesterday, jsonify
from flask_cache import Cache
from flask import session
from app.app_and_db import app
from collections import OrderedDict, Counter
from eventClass import Event
import datetime
import json
app.config['CACHE_TYPE'] = 'simple'
app.cache = Cache(app)
# used for the percentages of each metric
def convert_to_percent(value):
value = value * 100
value = float("{:.1f}".format(float(value)))
return value
# all the metrics calculations are done here
#@app.cache.cached(timeout=600, key_prefix='calculations')
def calculate(list):
# sort list by date integer in order to get transactions in right order
list.sort(key=lambda x: x.dateint, reverse=True)
# transaction calcuations
mrr = sum(x.amount for x in list if get_datetime(x.dateint) >= one_month_ago and x.type in
("charge.succeeded","charge.refunded") and len([y for y in list if y.type in
("charge.succeeded","charge.refunded") and y.customer_id == x.customer_id]) > 1)
prev_mrr = sum(x.amount for x in list if get_datetime(x.dateint) < one_month_ago and x.type in
("charge.succeeded","charge.refunded") and len([y for y in list if y.type in
("charge.succeeded","charge.refunded") and y.customer_id == x.customer_id]) > 1)
canceled = len(set([x.customer_id for x in list if x.type == "customer.subscription.deleted"
and get_datetime(x.dateint) >= one_month_ago]))
upgrades = len([x for x in list if x.type == "Upgrade" and get_datetime(x.dateint) >= one_month_ago])
prev_upgrades = len([x for x in list if x.type == "Upgrade" and get_datetime(x.dateint) < one_month_ago])
downgrades = len([x for x in list if x.type == "Downgrade" and get_datetime(x.dateint) >= one_month_ago])
prev_downgrades = len([x for x in list if x.type == "Downgrade" and get_datetime(x.dateint) < one_month_ago])
prev_canceled = len(set([x.customer_id for x in list if x.type == "customer.subscription.deleted"
and get_datetime(x.dateint) < one_month_ago]))
refunds = sum(x.amount for x in list if x.type == "charge.refunded" and get_datetime(x.dateint) >= one_month_ago)
prev_refunds = sum(x.amount for x in list if x.type == "charge.refunded" and get_datetime(x.dateint) < one_month_ago)
net_revenue = sum(x.amount for x in list if get_datetime(x.dateint) >= one_month_ago and x.type in
("charge.succeeded","charge.refunded"))
prev_net_revenue = sum(x.amount for x in list if get_datetime(x.dateint) < one_month_ago and x.type in
("charge.succeeded","charge.refunded"))
annual = mrr * 12
prev_annual = prev_mrr * 12
customers = len(set([x.customer_id for x in list if get_datetime(x.dateint) >= one_month_ago]))
prev_customers = len(set([x.customer_id for x in list if get_datetime(x.dateint) < one_month_ago]))
new_customers = len(set([x.customer_id for x in list if x.type == "customer.subscription.created"
and get_datetime(x.dateint) >= one_month_ago]))
prev_new_customers = len(set([x.customer_id for x in list if x.type == "customer.subscription.created"
and get_datetime(x.dateint) < one_month_ago]))
arpu = ((float(mrr) / customers) if customers != 0 else 0)
prev_arpu = ((prev_mrr / prev_customers) if prev_customers != 0 else 0)
#save the events list in session, so that we don't have to do an api call every time.
eventsList = []
for x in list:
y = json.dumps(x.__dict__)
eventsList.append(y)
# cut the list down for the live transactions on main dashboard
del list[19:]
# calculate churn and ltv
churn = (((float(canceled) / prev_customers) * 100) if prev_customers != 0 else 0)
ltv = ((arpu / churn) * 100 if churn != 0 else 0)
# the metrics that we are going to be sending to the dashboards to occupy metric tiles
current = (mrr, refunds, net_revenue, annual, customers, new_customers, arpu, canceled, upgrades, downgrades)
prev = (prev_mrr, prev_refunds, prev_net_revenue, prev_annual, prev_customers, prev_new_customers, prev_arpu,
prev_canceled, prev_upgrades, prev_downgrades)
percentages = [convert_to_percent((x - y) / float(y)) if y != 0 else convert_to_percent(0) for x, y in zip(current,prev)]
# add results to the session to avoid multiple calls to the API
session['events'] = eventsList
session['current'] = current
session['percentages'] = percentages
session['churn'] = churn
session['ltv'] = ltv
#print "just put events into the session"
#print session['events']
return list, current, percentages, churn, ltv
# gets the dates and the amounts for the chart for each metric to be displayed.
#@app.cache.cached(timeout=600, key_prefix='charts')
def chartify(events, metric):
d = OrderedDict()
dates = []
amounts = []
data = sorted(events, key=lambda x: x.dateint)
#data = [x for x in data if get_datetime(x.dateint) >= one_month_ago and x.type in ("charge.succeeded",
# "charge.refunded")]
if 'net' in metric:
data = [x for x in data if get_datetime(x.dateint) >= one_month_ago and x.type in ("charge.succeeded", "charge.refunded")]
elif 'monthly' in metric:
data = [x for x in data if get_datetime(x.dateint) >= one_month_ago and x.type in ("charge.succeeded","charge.refunded") and len([y for y in data if y.type in ("charge.succeeded","charge.refunded") and y.customer_id == x.customer_id]) > 1]
elif 'annual' in metric:
print
elif 'average' in metric:
print
elif 'total' in metric:
data = len(set([x.customer_id for x in list if get_datetime(x.dateint) >= one_month_ago]))
elif 'refunds' in metric:
print
elif 'churn' in metric:
print
elif 'lifetime' in metric:
print
elif 'new' in metric:
data = [x for x in data]
elif 'canceled' in metric:
print
elif 'upgrades' in metric:
print
elif 'downgrades' in metric:
print
if len(data) == 0:
dates.append(yesterday.strftime("%B %d"))
amounts.append(0)
dates.append(now.strftime("%B %d"))
amounts.append(0)
elif len(data) == 1:
dates.append((get_datetime(data[0].dateint) - datetime.timedelta(days=7)).strftime("%B %d"))
amounts.append(0)
dates.append(get_short_date_string(data[0].dateint))
amounts.append(data[0].amount)
if get_short_date_string(data[0].dateint) != now.strftime("%B %d"):
dates.append(now.strftime("%B %d"))
amounts.append(0)
else:
for item in data:
key = get_short_date_string(item.dateint)
if key not in d:
d[key] = []
d[key].append(item.amount)
for k, v in d.items():
dates.append(k)
amounts.append(sum(v))
#session['dates'] = dates
#session['amounts'] = amounts
return dates, amounts
| bsd-2-clause |
GutenkunstLab/SloppyCell | Example/Robertson/generate_ensembles_predictions.py | 1 | 3417 | from numpy import *
import scipy.stats
from SloppyCell.ReactionNetworks import *
import example_net, example_model
#
# Calculate an ensemble
#
# Step scale = 3 gives faster convergence for this problem.
print("Generating ensemble")
ens_data, costs, ratio = Ensembles.ensemble_log_params(example_model.m,
example_model.popt,
hess=example_model.jtj,
steps=10000,
step_scale=3)
ens_data = array(ens_data)
Utility.save(ens_data, 'example.ens_data.bpkl')
#
# Calculate a cost surface
# Note that we're defining our cost here to *not* include terms from priors,
# so we create a copy of our model without the priors and use that.
#
print("Generating cost surface")
Npt = 41
land_xx = logspace(-4, 2, Npt)
land_yy = logspace(1.5, 7.5, Npt)
Z = zeros((len(land_yy), len(land_xx)))
for ii,y in enumerate(land_yy):
for jj,x in enumerate(land_xx):
p = [x,y]
Z[ii,jj] = example_model.noprior_m.cost(p)
Utility.save((land_xx, land_yy, Z), 'example.model_surface.bpkl')
ens_data = Utility.load('example.ens_data.bpkl')
#
# Sample from "all measured" ensemble
#
measure_uncert = 0.25
Nsamp = len(ens_data)
ens_all_measured = exp(log(example_model.popt) + measure_uncert*random.randn(Nsamp,2))
#
# Sample from "one unmeasured" ensemble
#
ens_one_measured = zeros((Nsamp, 2))
ens_one_measured[:,0] = exp(log(example_model.popt[0]) + measure_uncert*random.randn(Nsamp))
ens_one_measured[:,1] = exp(log(example_model.popt[1]) + example_model.jtj_uncerts[1]*random.randn(Nsamp))
#
# Sample from jtj hessian
#
samp_mat = Ensembles._sampling_matrix(example_model.jtj)
ens_jtj = array([Ensembles._trial_move(samp_mat) for ii in range(Nsamp)])
ens_jtj = exp(log(example_model.popt) + ens_jtj)
Utility.save((ens_data, ens_all_measured, ens_one_measured, ens_jtj),
'example.ensembles.bpkl')
tt = linspace(0,50,100)
def calc_trajectories(ens, tt):
yys = []
for p in ens:
traj = Dynamics.integrate(example_net.pred_net, tt, params=p,
fill_traj=False)
yy = traj.get_var_traj('C')
yys.append(yy)
return yys
def calc_trajectory_bounds(yys, lw_percent=2.5, up_percent=97.5):
up, lw = [], []
for yt in array(yys).transpose():
up.append(scipy.stats.scoreatpercentile(yt, 97.5))
lw.append(scipy.stats.scoreatpercentile(yt, 2.5))
return array(lw), array(up)
#
# Calculate predictions
#
pred_data = calc_trajectories(ens_data[::10], tt)
pred_all_measured = calc_trajectories(ens_all_measured[::10], tt)
pred_one_measured = calc_trajectories(ens_one_measured[::10], tt)
pred_jtj = calc_trajectories(ens_jtj[::10], tt)
#
# Calculate 95% bounds on predictions
#
pred_data_lw, pred_data_up = calc_trajectory_bounds(pred_data)
pred_all_measured_lw, pred_all_measured_up = calc_trajectory_bounds(pred_all_measured)
pred_one_measured_lw, pred_one_measured_up = calc_trajectory_bounds(pred_one_measured)
pred_jtj_lw, pred_jtj_up = calc_trajectory_bounds(pred_jtj)
Utility.save((pred_data_lw, pred_data_up,
pred_all_measured_lw, pred_all_measured_up,
pred_one_measured_lw, pred_one_measured_up,
pred_jtj_lw, pred_jtj_up), 'example.pred_bounds.bpkl')
| bsd-3-clause |
yitian134/chromium | tools/isolate/isolate_test.py | 5 | 2915 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import sys
import tempfile
import unittest
import isolate
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class Isolate(unittest.TestCase):
def setUp(self):
# Everything should work even from another directory.
os.chdir(os.path.dirname(ROOT_DIR))
def _run_process_options(self, values, variables, more_expected_data):
"""Runs isolate.process_options() and verify the results."""
fd, temp_path = tempfile.mkstemp()
try:
# Reuse the file descriptor. On windows, it needs to be closed before
# process_options() opens it, because mkstemp() opens it without file
# sharing enabled.
with os.fdopen(fd, 'w') as f:
json.dump(values, f)
root_dir, infiles, data = isolate.process_options(
variables,
temp_path,
os.path.join('isolate', 'data', 'isolate', 'touch_root.isolate'),
self.fail)
finally:
os.remove(temp_path)
expected_data = {
u'command': ['python', 'touch_root.py'],
u'read_only': None,
u'relative_cwd': 'data/isolate',
u'resultfile': temp_path,
u'resultdir': tempfile.gettempdir(),
u'variables': {},
}
expected_data.update(more_expected_data)
expected_files = sorted(
('isolate.py', os.path.join('data', 'isolate', 'touch_root.py')))
self.assertEquals(ROOT_DIR, root_dir)
self.assertEquals(expected_files, sorted(infiles))
self.assertEquals(expected_data, data)
def test_load_empty(self):
content = "{}"
command, infiles, read_only = isolate.load_isolate(
content, self.fail)
self.assertEquals([], command)
self.assertEquals([], infiles)
self.assertEquals(None, read_only)
def test_process_options_empty(self):
# Passing nothing generates nothing unexpected.
self._run_process_options({}, {}, {})
def test_process_options(self):
# The previous unexpected variables are kept, the 'variables' dictionary is
# updated.
values = {
'command': 'maybe',
'foo': 'bar',
'read_only': 2,
'relative_cwd': None,
'resultdir': '2',
'resultfile': [],
'variables': {
'unexpected': 'seriously',
# This value is updated.
'expected': 'stale',
},
}
expected_data = {
u'foo': u'bar',
u'variables': {
'expected': 'very',
u'unexpected': u'seriously',
},
}
self._run_process_options(values, {'expected': 'very'}, expected_data)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| bsd-3-clause |
crazy-canux/django | tests/project_template/test_settings.py | 274 | 1083 | import unittest
from django.test import TestCase
from django.utils import six
@unittest.skipIf(six.PY2,
'Python 2 cannot import the project template because '
'django/conf/project_template doesn\'t have an __init__.py file.')
class TestStartProjectSettings(TestCase):
def test_middleware_classes_headers(self):
"""
Ensure headers sent by the default MIDDLEWARE_CLASSES do not
inadvertently change. For example, we never want "Vary: Cookie" to
appear in the list since it prevents the caching of responses.
"""
from django.conf.project_template.project_name.settings import MIDDLEWARE_CLASSES
with self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
ROOT_URLCONF='project_template.urls',
):
response = self.client.get('/empty/')
headers = sorted(response.serialize_headers().split(b'\r\n'))
self.assertEqual(headers, [
b'Content-Type: text/html; charset=utf-8',
b'X-Frame-Options: SAMEORIGIN',
])
| bsd-3-clause |
s-macke/jor1k-sysroot | fs/usr/lib/python2.7/encodings/big5hkscs.py | 816 | 1039 | #
# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_hk, codecs
import _multibytecodec as mbc
codec = _codecs_hk.getcodec('big5hkscs')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='big5hkscs',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
VahidGh/ChannelWorm | channelworm/fitter/examples/EGL-36.py | 4 | 7717 | """
Example of using cwFitter to generate a HH model for EGL-36 ion channel
Based on experimental data from doi:10.1016/S0896-6273(00)80355-4
"""
import os.path
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../..')
from channelworm.fitter import *
if __name__ == '__main__':
cwd=os.getcwd()
path = cwd+'/egl-36-data/boltzmannFit/'
if not os.path.exists(path):
os.makedirs(path)
pov_id = 11
vc_id = 12
args = {'weight':{'start':1,'peak':1,'tail':1,'end':1}}
sampleData = {}
myInitiator = initiators.Initiator()
print 'Sample Data:'
sampleData['POV'] = myInitiator.get_graphdata_from_db(pov_id,plot=False)
print 'POV'
sampleData['VClamp'] = myInitiator.get_graphdata_from_db(vc_id, plot=False)
print 'VClamp'
scale = False
bio_params = myInitiator.get_bio_params()
sim_params = myInitiator.get_sim_params()
myEvaluator = evaluators.Evaluator(sampleData,sim_params,bio_params,scale=scale,args=args)
print 'Scale: %s'%scale
print 'args:'
print args
# bio parameters for EGL-36
bio_params['cell_type'] = 'Xenopus oocytes'
bio_params['channel_type'] = 'EGL-36'
bio_params['ion_type'] = 'K'
bio_params['val_cell_params'][0] = 200e-9 # C_mem DOI: 10.1074/jbc.M605814200
bio_params['val_cell_params'][1] = 20e-6 # area DOI: 10.1101/pdb.top066308
bio_params['gate_params'] = {'vda': {'power': 1},'cd': {'power': 1}}
print 'Gate_params:'
print bio_params['gate_params']
bio_params['channel_params'] = ['g_dens','e_rev']
bio_params['unit_chan_params'] = ['S/m2','V']
bio_params['min_val_channel'] = [1 , -150e-3]
bio_params['max_val_channel'] = [10, 150e-3]
bio_params['channel_params'].extend(['v_half_a','k_a','T_a'])
bio_params['unit_chan_params'].extend(['V','V','s'])
bio_params['min_val_channel'].extend([-0.15, 0.001, 0.001])
bio_params['max_val_channel'].extend([ 0.15, 0.1, 1])
# # #Parameters for Ca-dependent inactivation (Boyle & Cohen 2008)
bio_params['channel_params'].extend(['ca_half','alpha_ca','k_ca','T_ca'])
bio_params['unit_chan_params'].extend(['M','','M','s'])
bio_params['min_val_channel'].extend([1e-10,0.1, -1e-6, 1e-4])
bio_params['max_val_channel'].extend([1e-6 , 1 , -1e-9, 1])
# TODO: Separate simulator protocols from plot
# Simulation parameters for EGL-36 VClamp and POV
sim_params['v_hold'] = -90e-3
sim_params['I_init'] = 0
sim_params['pc_type'] = 'VClamp'
sim_params['deltat'] = 1e-4
sim_params['duration'] = 1.2
sim_params['start_time'] = 0.045
sim_params['end_time'] = 1.055
sim_params['protocol_start'] = -90e-3
sim_params['protocol_end'] = 90e-3
sim_params['protocol_steps'] = 10e-3
sim_params['ca_con'] = 1e-6
print 'Sim_params:'
print sim_params
# opt = '-pso'
# opt = '-ga'
# opt = 'leastsq'
opt = None
print 'Optimization method:'
print opt
if len(sys.argv) == 2:
opt = sys.argv[1]
start = time.time()
if opt == '-ga':
opt_args = myInitiator.get_opt_params()
opt_args['max_evaluations'] = 300
opt_args['population_size'] = 600
# opt_args['verbose'] = False
best_candidate, score = myEvaluator.ga_evaluate(min=bio_params['min_val_channel'],
max=bio_params['max_val_channel'],
args=opt_args)
elif opt == '-pso':
opt_args = myInitiator.get_opt_params(type='PSO')
opt_args['minstep'] = 1e-18
opt_args['minfunc'] = 1e-18
opt_args['swarmsize'] = 500
opt_args['maxiter'] = 100
opt_args['POV_dist'] = 4e-4
best_candidate, score = myEvaluator.pso_evaluate(lb=bio_params['min_val_channel'],
ub=bio_params['max_val_channel'],
args=opt_args)
else:
opt_args = {}
# vda,cd *******
best_candidate = [ 3.00231776e+00, -9.00073633e-02, 6.02673501e-02, 1.95933741e-02,
2.53990016e-02, 1.00000000e-9, 8.18616232e-01, -3.29244576e-08,
2.42556384e-01] # 7.85842303587e-15
if opt_args:
print 'Optimization parameters:'
print opt_args
if opt == 'leastsq':
# best_candidate = np.asarray(bio_params['min_val_channel']) + np.asarray(bio_params['max_val_channel']) / 2
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
# sim_params['protocol_start'] = 10e-3
# sim_params['protocol_end'] = 70e-3
# vcSim = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'],act_fit=True)
vcSim = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
vcEval = evaluators.Evaluator(sampleData,sim_params,bio_params,scale=scale)
# args['weight'] = {'POV':10}
args['weight'] = {}
args['ftol'] = 1e-14
# args['xtol'] = 1e-14
# args['full_output'] = 1
result = vcSim.vclamp_leastsq(params= bio_params['channel_params'],
best_candidate= best_candidate,
sampleData=sampleData,args=args)
print 'Optimized using Scipy leastsq:'
print result
print 'Full output:'
print result
print 'leastsq Parameters:'
print args
best_candidate = result
if 'POV' in sampleData:
POV_fit_cost = vcEval.pov_cost(result)
print 'POV cost:'
print POV_fit_cost
VClamp_fit_cost = vcEval.vclamp_cost(result)
print 'VClamp cost:'
print VClamp_fit_cost
secs = time.time()-start
print("----------------------------------------------------\n\n"
+"Ran in %f seconds (%f mins)\n"%(secs, secs/60.0))
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
print 'best candidate after optimization:'
print best_candidate_params
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'],act_fit=True)
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
#
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.compare_plots(sampleData,bestSim,show=True, path=path)
myModelator.patch_clamp_plots(bestSim,show=True, path=path)
# # Decreasing voltage steps for pretty gating plots
sim_params['protocol_steps'] = 1e-3
# # sim_params['deltat'] = 1e-5
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
#
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.gating_plots(bestSim, show=True, path=path)
# Generate NeuroML2 file
contributors = [{'name': 'Vahid Ghayoomi','email': 'vahidghayoomi@gmail.com'}]
model_params = myInitiator.get_modeldata_from_db(fig_id=vc_id,model_id=3,contributors=contributors,file_path=path)
print model_params
nml2_file = myModelator.generate_channel_nml2(bio_params,best_candidate_params,model_params)
run_nml_out = myModelator.run_nml2(model_params['file_name'])
| mit |
dd00/commandergenius | project/jni/python/src/Lib/dis.py | 166 | 6484 | """Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all
del _opcodes_all
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if type(x) is types.InstanceType:
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if type(x1) in (types.MethodType,
types.FunctionType,
types.CodeType,
types.ClassType):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i in linestarts:
if i > 0:
print
print "%3d" % linestarts[i],
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print repr(oparg).rjust(5),
if op in hasconst:
print '(' + repr(co.co_consts[oparg]) + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print repr(oparg).rjust(5),
if op in hasconst:
if constants:
print '(' + repr(constants[oparg]) + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
| lgpl-2.1 |
Y3K/django | tests/forms_tests/widget_tests/test_datetimeinput.py | 247 | 2367 | from datetime import datetime
from django.forms import DateTimeInput
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class DateTimeInputTest(WidgetTest):
widget = DateTimeInput()
def test_render_none(self):
self.check_html(self.widget, 'date', None, '<input type="text" name="date" />')
def test_render_value(self):
"""
The microseconds are trimmed on display, by default.
"""
d = datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertEqual(str(d), '2007-09-17 12:51:34.482548')
self.check_html(self.widget, 'date', d, html=(
'<input type="text" name="date" value="2007-09-17 12:51:34" />'
))
self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51, 34), html=(
'<input type="text" name="date" value="2007-09-17 12:51:34" />'
))
self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51), html=(
'<input type="text" name="date" value="2007-09-17 12:51:00" />'
))
def test_render_formatted(self):
"""
Use 'format' to change the way a value is displayed.
"""
widget = DateTimeInput(
format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'},
)
d = datetime(2007, 9, 17, 12, 51, 34, 482548)
self.check_html(widget, 'date', d, html='<input type="datetime" name="date" value="17/09/2007 12:51" />')
@override_settings(USE_L10N=True)
@translation.override('de-at')
def test_l10n(self):
d = datetime(2007, 9, 17, 12, 51, 34, 482548)
self.check_html(self.widget, 'date', d, html=(
'<input type="text" name="date" value="17.09.2007 12:51:34" />'
))
@override_settings(USE_L10N=True)
@translation.override('de-at')
def test_locale_aware(self):
d = datetime(2007, 9, 17, 12, 51, 34, 482548)
with self.settings(USE_L10N=False):
self.check_html(
self.widget, 'date', d,
html='<input type="text" name="date" value="2007-09-17 12:51:34" />',
)
with translation.override('es'):
self.check_html(
self.widget, 'date', d,
html='<input type="text" name="date" value="17/09/2007 12:51:34" />',
)
| bsd-3-clause |
KrbAlmryde/Utilities | WordBoundary/HighPredictability.py | 1 | 6889 | dset = [(1,'L','Cerebellum-vermis',None,True,None),
(1,'R','Cerebellum-VI',None,True,True),
(1,'L','Cingulate-posterior',None,None,True),
(1,'R','Cingulate-posterior',None,None,True),
(1,'R','IFG-pars opercularis',None,None,True),
(1,'R','IFG-pars triangularis',None,None,True),
(1,'R','Lingual Gyrus',None,None,True),
(1,'R','LOC',None,True,None),
(1,'R','MTG',True,True,None),
(1,'L','PostCG-dorsolateral',None,None,True),
(1,'L','PreCG-dorsolateral 1',None,True,True),
(1,'L','SFG-dorsal/paracingulate',None,True,None),
(1,'R','SFG-dorsal/paracingulate',None,True,None),
(1,'L','STG',True,True,True),
(1,'R','STG',None,None,True),
(1,'L','Thalamus',None,True,None),
(1,'R','Thalamus',None,True,True),
(2,'R','Angular Gyrus',None,True,None),
(2,'R','Caudate-head',True,None,None),
(2,'R','Caudate-head 1',True,None,None),
(2,'R','Cingulate-anterior',None,None,True),
(2,'R','Insula-anterior',None,None,True),
(2,'R','Insula-middle',None,None,True),
(2,'R','LOC-dorsolateral',None,True,None),
(2,'L','LOC-inferior',None,True,True),
(2,'R','LOC-inferior',True,None,None),
(2,'L','MTG-anterior',True,None,None),
(2,'R','MTG-anterior',True,None,None),
(2,'L','MTG-posterior',True,None,None),
(2,'R','MTG-posterior',True,None,None),
(2,'R','PreCG-lateral',True,True,None),
(2,'L','Precuneus-inferior',None,None,True),
(2,'R','Precuneus-inferior',True,None,True),
(2,'L','Precuneus-superior',True,None,None),
(2,'L','SMG',None,None,True),
(2,'L','SMG 1',None,True,None),
(2,'R','STG-posterior',None,None,True),
(2,'L','Subcallosal gray',None,None,True),
(3,'L','Angular Gyrus',True,True,True),
(3,'L','Anterior Insula',None,None,True),
(3,'L','Brainstem-midbrain',None,None,True),
(3,'R','Brainstem-midbrain',None,None,True),
(3,'R','Cingulate-posterior inferior',None,None,True),
(3,'R','Fusiform-occipital division',None,True,None),
(3,'L','IFG-pars opercularis',True,True,None),
(3,'R','IFG-pars opercularis 1',True,None,None),
(3,'R','IFG-pars orbitalis',None,True,True),
(3,'R','Insula-anterior 1',None,None,True),
(3,'R','Insula-middle 1',True,True,None),
(3,'R','Lingual Gyrus 1',None,True,None),
(3,'R','LOC-dorsolateral 1',None,None,True),
(3,'R','LOC-superior',True,None,True),
(3,'L','MFG--anterior',None,True,None),
(3,'L','MFG-dorsolateral',None,None,True),
(3,'L','MFG-ventral',None,None,True),
(3,'L','MTG',True,True,True),
(3,'L','MTG-posterior 1',None,True,None),
(3,'L','Orbital Gyrus',None,True,None),
(3,'L','PreCG-Inferior',None,True,None),
(3,'R','Precuneus-inferior 1',None,None,True),
(3,'L','Precuneus-superior 1',True,None,True),
(3,'L','SFG-dorsomedial',True,None,True),
(3,'R','SFG-dorsomedial',True,True,True),
(3,'L','SFG-frontal pole',None,None,True),
(3,'L','SFG-ventromedial',None,None,True),
(3,'R','SFG-ventromedial',None,True,True),
(3,'L','SMG-inferior',None,True,None),
(3,'R','STG-temporal pole',None,True,None),
(3,'L','Thalamus 1',None,True,True),
(3,'R','Thalamus 1',None,None,True),
(4,'L','Amygdala',None,None,True),
(4,'L','Angular Gyrus 1',True,None,True),
(4,'R','Angular Gyrus 1',True,True,True),
(4,'L','Brainstem-midbrain 1',True,None,None),
(4,'R','cerebellum-Crus II',None,None,True),
(4,'L','Cerebellum-V',True,None,None),
(4,'R','Cerebellum-V',None,True,None),
(4,'R','Cerebellum-Vermis',True,None,None),
(4,'R','Cerebellum-VI 1',True,None,None),
(4,'L','Cingulate-anterior',None,True,None),
(4,'R','Cingulate-anterior 1',None,True,None),
(4,'L','Cingulate-posterior 1',None,None,True),
(4,'L','Fusiform-temporal division',True,None,None),
(4,'L','Globus Palidus',None,None,True),
(4,'L','IFG-pars opercularis 1',True,True,None),
(4,'L','IFG-pars orbitalis',None,None,True),
(4,'R','Lingual Gyrus 2',True,None,True),
(4,'R','Lingual/Cerebellum',True,True,True),
(4,'L','MFG-dorsolateral 1',None,True,None),
(4,'L','MFG-frontal pole',True,None,None),
(4,'L','MTG 1',True,True,True),
(4,'R','MTG 1',None,None,True),
(4,'L','Orbital gyrus',True,True,True),
(4,'L','Parahippocampal gyrus',True,None,None),
(4,'L','Parahippocampal gyrus 1',None,None,True),
(4,'R','Parietal operculum',True,True,True),
(4,'R','PostCG-dorsal',True,True,True),
(4,'R','PreCG-dorsal',None,True,None),
(4,'L','Precuneus-inferior 1',None,True,None),
(4,'R','Precuneus-inferior 2',True,None,None),
(4,'L','Precuneus-middle',True,None,None),
(4,'L','Putamen',True,None,None),
(4,'L','SFG-dorsomedial 1',True,None,None),
(4,'R','SFG-dorsomedial 1',True,True,True),
(4,'L','SFG-dorsomedial 2',None,True,None),
(4,'L','SFG-dosolateral',None,None,True),
(4,'L','SFG-frontal pole 1',True,None,True),
(4,'R','SFG-ventromedial 1',None,None,True),
(4,'R','STG 1',True,True,None),
(4,'L','Thalamus 1',True,None,None),
(5,'R','Angular Gyrus 2',None,None,True),
(5,'L','Cerebellum-crus I',None,True,None),
(5,'R','Cerebellum-crus I',True,None,None),
(5,'L','Cingulate-anterior 1',True,True,True),
(5,'R','Cingulate-anterior 2',None,None,True),
(5,'L','Cingulate-posterior 2',True,True,True),
(5,'L','Cingulate-posterior inferior',None,None,True),
(5,'R','Cingulate-posterior inferior 1',None,True,None),
(5,'R','Cingulate-posterior 1',True,None,None),
(5,'L','Cuneate gyrus',None,True,True),
(5,'R','Cuneate gyrus',None,True,True),
(5,'L','Fusiform-Temporal division',None,None,True),
(5,'R','IFG-pars opercularis 2',None,None,True),
(5,'R','IFG-pars triangularis 1',True,True,None),
(5,'R','ITG',True,True,True),
(5,'L','ITG-posterior',True,True,None),
(5,'L','LOC-superior',None,True,True),
(5,'R','LOC-superior 1',True,True,None),
(5,'L','MFG-dorsolateral',None,None,True),
(5,'L','PreCG-dorsolateral',True,True,True),
(5,'L','SFG--ventromedial',True,None,None),
(5,'L','SFG-frontal pole 2',None,True,True),
(5,'R','SFG-frontal pole',True,True,True),
(5,'L','SMG',True,None,None),
(5,'R','SMG',True,True,None),
(5,'L','SPL',None,True,None),
(5,'R','STG-temporal pole 1',None,True,True)]
| mit |
NickPresta/sentry | src/sentry/utils/safe.py | 3 | 2330 | """
sentry.utils.safe
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
from django.db import transaction
from sentry.constants import MAX_VARIABLE_SIZE, MAX_DICTIONARY_ITEMS
from sentry.utils.strings import truncatechars
def safe_execute(func, *args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, e:
transaction.rollback_unless_managed()
if hasattr(func, 'im_class'):
cls = func.im_class
else:
cls = func.__class__
logger = logging.getLogger('sentry.errors.plugins')
logger.error('Error processing %r on %r: %s', func.__name__, cls.__name__, e, extra={
'func_module': cls.__module__,
'func_args': args,
'func_kwargs': kwargs,
}, exc_info=True)
else:
return result
def trim(value, max_size=MAX_VARIABLE_SIZE, max_depth=3, _depth=0, _size=0, **kwargs):
"""
Truncates a value to ```MAX_VARIABLE_SIZE```.
The method of truncation depends on the type of value.
"""
options = {
'max_depth': max_depth,
'max_size': max_size,
'_depth': _depth + 1,
}
if _depth > max_depth:
return trim(repr(value), _size=_size, max_size=max_size)
elif isinstance(value, dict):
result = {}
_size += 2
for k, v in value.iteritems():
trim_v = trim(v, _size=_size, **options)
result[k] = trim_v
_size += len(unicode(trim_v)) + 1
if _size >= max_size:
break
elif isinstance(value, (list, tuple)):
result = []
_size += 2
for v in value:
trim_v = trim(v, _size=_size, **options)
result.append(trim_v)
_size += len(unicode(trim_v))
if _size >= max_size:
break
elif isinstance(value, basestring):
result = truncatechars(value, max_size - _size)
else:
result = value
return result
def trim_dict(value, max_items=MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
for idx, key in enumerate(value.keys()):
value[key] = trim(value[key], **kwargs)
if idx > max_items:
del value[key]
| bsd-3-clause |
sdvillal/manysources | manysources/analyses/substructures.py | 1 | 31188 | from collections import defaultdict
import os.path as op
from itertools import izip
import warnings
import cPickle as pickle
import glob
import math
import os
import h5py
import pandas as pd
import numpy as np
from rdkit import Chem
import matplotlib.pyplot as plt
from manysources import MANYSOURCES_ROOT
from manysources.datasets import ManysourcesDataset, MANYSOURCES_MOLECULES
from manysources.hub import Hub
warnings.simplefilter("error")
PROBLEMATIC_EXPIDS = {'bcrp':[489, 1840, 2705, 2780, 3842], 'hERG':[]}
def substructs_weights_one_source(source, model='logreg3', feats='ecfps1', dset='bcrp', num_expids=4096):
"""
Given a source, what are the weights of all the substructures when this source is in train / in test for LSO for
all requested expids. We now use the Hub. For the cases where the source is in train, it happens many times per
expid so we take the average.
"""
importances_source_in_lso = []
expids = tuple(range(num_expids))
hub = Hub(dset_id=dset, lso=True, model=model, feats=feats, expids=expids)
source_coocs = hub.scoocs()
# indices (expids, fold id) of source in test
indices_in_test = source_coocs[source_coocs[source]].index
indices_in_test = [(expid, foldnum) for (expid, foldnum) in indices_in_test if expid not in PROBLEMATIC_EXPIDS[dset]]
# indices (expids, fold ids) of source in train
indices_in_train = source_coocs[source_coocs[source]==False].index
# transform it into a dictionary of {expids:[foldnums]}
indices_in_train_dict = defaultdict(list)
for expid, foldnum in indices_in_train:
if expid not in PROBLEMATIC_EXPIDS[dset]:
indices_in_train_dict[expid].append(foldnum)
# get corresponding weights
weights,_, expids, foldnums = hub.logreg_models()
rows_out = [row for row, (expid, foldnum) in enumerate(izip(expids, foldnums))
if (expid, foldnum) in indices_in_test]
weights_in_test = weights[rows_out, :].todense()
# For train, we get several foldnums per expids and we want to average those weights
for expid_in in indices_in_train_dict.keys():
rows = [row for row, (expid, fold) in enumerate(izip(expids, foldnums)) if expid == expid_in
and fold in indices_in_train_dict[expid_in]]
w = weights[rows, :]
w = np.squeeze(np.asarray(w.tocsc().mean(axis=0)))
importances_source_in_lso.append(w)
return indices_in_train_dict.keys(), np.array(importances_source_in_lso), np.asarray(weights_in_test)
def proportion_relevant_features(source, dset='bcrp', model='logreg3', feats='ecfps1'):
"""
Here, "relevant" refears to having a non null weight in the models.
"""
sums_in = []
sums_out = []
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats, dset=dset)
for weights_in, weights_out in zip(all_weights_in, all_weights_out):
sums_in.append(np.sum(weights_in != 0))
sums_out.append(np.sum(weights_out != 0))
return np.mean(np.array(sums_in)), np.mean(np.array(sums_out))
def most_changing_substructs_source(dset='bcrp', model='logreg3', feats='ecfps1', source='Imai_2004', top=10):
"""
Returns a dictionary of {substruct:[changes in weight]} for all the expids in which the substructure was among the
top most changing in terms of logistic weights (comparing weights when the source is in training or in test)
"""
if not op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats))):
substruct_changes_dict = defaultdict(list)
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats, dset=dset)
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
# dimensions: expids * num substructures
# get the absolute weight difference between source in and source out
difference_weights = np.absolute(np.subtract(all_weights_in, all_weights_out))
orders = np.argsort(difference_weights, axis=1) # for each expid, indices of the sorted weight differences
# Let's take top n differences
for expid, o_i in enumerate(orders[:,-top:]): # because the argsort puts first the smallest weight differences!
great_substructs = [i2s[i] for i in o_i]
corresponding_weights = difference_weights[expid][o_i]
for i, sub in enumerate(great_substructs):
substruct_changes_dict[sub].append(corresponding_weights[i])
# substruct_changes_dict now contains the changes of weight obtained for all the expid in which the substruct was
# among the top n changing substructs.
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats)), 'wb') as writer:
pickle.dump(substruct_changes_dict, writer, protocol=pickle.HIGHEST_PROTOCOL)
return substruct_changes_dict
else:
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'top_%i_substructures_changing_weight_%s_%s_%s.dict'
%(top, source, model, feats)), 'rb') as reader:
return pickle.load(reader)
def substructures_change_weight(source, model='logreg3', feats='ecfps1', dset='bcrp', non_zero_diff=0.01):
"""
Given a source, retrieve substructures present in the source that on averagenchange weight when this source is in
train / in test in LSO. Also returns the occurrences of these substructures in the 2 classes (inhibitior /
non inhibitor)
"""
_, weights_ins, weights_outs = substructs_weights_one_source(source, model=model, feats=feats, dset=dset)
# average over all expids:
weights_in = np.array(weights_ins).mean(axis=0)
# average over all expids
weights_out = np.array(weights_outs).mean(axis=0)
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
difference_weights = np.array(weights_in - weights_out)
order = np.argsort(difference_weights)
ordered_diff_w = difference_weights[order]
ordered_substr = i2s[order]
print '%i substructures have their weights decreased when the source %s is in external set (LSO)' \
%(len(ordered_substr[ordered_diff_w > non_zero_diff]), source)
print '%i substructures have their weights increased when the source %s is in external set (LSO)' \
%(len(ordered_substr[ordered_diff_w < - non_zero_diff]), source)
# Retrieve occurrence in source for each of those substructures with non-zero difference
subs_dict = defaultdict(list)
# 1. Decreased weight
for subs, weight_diff in zip(ordered_substr[ordered_diff_w > non_zero_diff], ordered_diff_w[ordered_diff_w > non_zero_diff]):
n1, n2 = substructure_appears_in_source(subs, source, dataset_nickname=dset)
if (n1, n2) != (0,0):
subs_dict[subs].append((n1, n2))
subs_dict[subs].append(weight_diff)
# 2. Increased weight
for subs, weight_diff in zip(ordered_substr[ordered_diff_w < -non_zero_diff], ordered_diff_w[ordered_diff_w < - non_zero_diff]):
n1, n2 = substructure_appears_in_source(subs, source, dataset_nickname=dset)
if (n1, n2) != (0,0):
subs_dict[subs].append((n1, n2))
subs_dict[subs].append(weight_diff)
return subs_dict
def substructure_appears_in_source(substr, source, dataset_nickname='bcrp'):
"""
Returns a tuple (int, int) counting how many compounds of the given source contain the given substructure.
In position 0 of the tuple, we report the number of inhibitors and in position 1 we report the number of inactives.
"""
rdkimold = MANYSOURCES_MOLECULES[dataset_nickname]()
in_class_1 = 0
in_class_0 = 0
molid2mol = {}
molids = rdkimold.molids()
molid2src = {}
molid2act = {}
for molid in molids:
molid2mol[molid] = rdkimold.molid2mol(molid)
molid2src[molid] = rdkimold.molid2source(molid)
molid2act[molid] = rdkimold.molid2label(molid)
src2molids = defaultdict(list)
for molid, src in molid2src.iteritems():
src2molids[src].append(molid)
# How many molecules contain this substructure in class 1, how many in class 0
for molid in src2molids[source]:
molec = molid2mol[molid]
patt = Chem.MolFromSmarts(substr)
act = molid2act[molid]
if molec.HasSubstructMatch(patt):
if act == 'INHIBITOR': # Careful, maybe this does not work for all datasets!!
in_class_1 += 1
else:
in_class_0 += 1
return in_class_1, in_class_0
def faster_relevant_feats(source, dset='bcrp'):
"""
Here we try to do the same but using Santi's advice
"""
X, _ = ManysourcesDataset(dset).ecfpsXY(no_dupes=True) # the sparse matrix of the features
all_molids = list(ManysourcesDataset(dset).ecfps(no_dupes=True).molids)
molids_in_source = ManysourcesDataset(dset).molecules().source2molids(source)
mol_indices = np.array([all_molids.index(molid) for molid in molids_in_source])
Xsource = X[mol_indices, :]
features_indices = set(Xsource.indices)
return features_indices
def plot_smarts(smarts, directory):
from integration import smartsviewer_utils
if len(smarts) > 1: # let's remove the C and c...
svr = smartsviewer_utils.SmartsViewerRunner(w=200, h=200)
svr.depict(smarts, op.join(directory, smarts + '.png'))
return op.join(directory, smarts + '.png')
def positive_negative_substructs(model='logreg3', feats='ecfps1', dset='bcrp', lso=True, num_expids=4096,
top_interesting=20):
'''
Given a dataset, collect all weights for all substructures across all expids, then average them and check the
extremes: positive weights mean a substructure that is likely to occur in inhibitors, negative weights mean
substructures more likely to occur in non-inhibitors. Are we learning something?
'''
hub = Hub(dset_id=dset, expids=num_expids, lso=lso, model=model, feats=feats)
weights, _, expids, foldnums = hub.logreg_models()
average_weights = np.asarray(weights.mean(axis=0))[0]
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
order = np.argsort(average_weights)
ordered_substructures = i2s[order]
ordered_importances = average_weights[order]
top_inactives = zip(ordered_importances[0:top_interesting], ordered_substructures[0:top_interesting])
top_inhibitors = zip(ordered_importances[-top_interesting:], ordered_substructures[-top_interesting:])
# Let's plot them!
from PIL import Image
for weight, substr in top_inactives:
plot_smarts(substr, '/home/flo/Desktop')
ims = [Image.open(f) for f in glob.glob(op.join('/home/flo/Desktop', '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'substructs_max_negative_weights_lso.png'))
for f in glob.glob(op.join('/home/flo/Desktop', '*.png')):
os.remove(f)
for weight, substr in top_inhibitors:
plot_smarts(substr, '/home/flo/Desktop')
ims = [Image.open(f) for f in glob.glob(op.join('/home/flo/Desktop', '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'substructs_max_positive_weights_lso.png'))
for f in glob.glob(op.join('/home/flo/Desktop', '*.png')):
os.remove(f)
return top_inactives, top_inhibitors
print positive_negative_substructs()
exit(33)
"""
What can we do with this information?
1. compare rankings: are the most weighted substructures (in absolute value) still highly weighted when x source is
in test?
2. check the change in weight for each source across all the expids and do a t-test to check which ones are
significant
3. which substructures are present in the source? Do they show significant change in weight?
4. were those substructures actually important (high absolute value of weight) when the source was part of the
training set?
9. Can we correlate the change of weight (second whiskerplot with only substructures occurring in source) with a worse
prediction of the source? (in terms of average loss for all mols for all splits where the source is in external set)
Comments from Santi:
5. How hard would it be to adapt your code to find out a ranking of features according to how much did they "changed
weight" between "source in" and "source out". That maybe would allow us to highlight concrete features.
6. How hard would it be to restrict this same plot to only subsets of relevant features (substructures) for each source.
And by relevant I mean "substructures that actually appear in the source". For large sources, I would expect not a big
change (because most substructures will be relevant for the source). But for not so big sources, I would expect this
to change the plot dramatically. I think so because I believe that important changes in substructures get hidden by the
overwhelming majority of features that do not care about a source being or not available for training.
7. Can you imagine a way of labelling folds according to whether a molecule was better or worse predicted than average?
8. Can you imagine that performing regression on a weight of a feature we find interesting, using as predictors the
coocurrences, would somehow allow us to explain what coocurrences make a feature important/unimportant?
"""
def test_ranking(weights_in, weights_out):
import scipy.stats as stats
return stats.spearmanr(weights_in, weights_out) # returns r and the p-value associated
def overall_ranking(source, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Creates a dictionary and pickles it. Does not return anything. For each expid, computes the Spearman coeff correl
between the weights in and the weights out (across all substructures)
"""
if op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'spearmans_lso_' + source + '_' + model + '_' + feats + '.dict')):
return
spearmans = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
print len(expids), len(all_weights_in), len(all_weights_out)
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
spearmanr, pvalue = test_ranking(weights_in, weights_out)
spearmans[expid] = (spearmanr, pvalue)
print expid, spearmanr, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'spearmans_lso_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(spearmans, writer, protocol=pickle.HIGHEST_PROTOCOL)
def ranking_relevant_features(source, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Same as before but by "relevant features", we mean features that actually occur in the given source
"""
if op.exists(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'spearmans_lso_relfeats_' + source + '_' + model + '_' + feats + '.dict')):
return
spearmans = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
relevant_feature_indexes = list(faster_relevant_feats(source, dset=dset))
# Select only weights that correspond to relevant features for the given source
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
# only to the Spearman test on the relevant feature weights
spearmanr, pvalue = test_ranking(weights_in[relevant_feature_indexes], weights_out[relevant_feature_indexes])
spearmans[expid] = (spearmanr, pvalue)
print expid, spearmanr, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset,
'spearmans_lso_relfeats_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(spearmans, writer, protocol=pickle.HIGHEST_PROTOCOL)
def plot_spearman_coefs_all_sources(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_lso_*')):
if not 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_')[2].partition('_logreg')[0]
print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = spearmans
df = pd.DataFrame.from_dict(big_dict)
tidy_df = pd.melt(df, var_name='source', value_name='Spearman correlation coefficient')
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.boxplot('source', 'Spearman correlation coefficient', data=tidy_df)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.xlabel('Source')
plt.ylabel('Spearman correlation of feature weights')
plt.ylim([0,1])
plt.show()
def plot_spearman_only_relevant_feats_all_sources(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = spearmans
df = pd.DataFrame.from_dict(big_dict)
tidy_df = pd.melt(df, var_name='source', value_name='Spearman correlation coefficient')
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.boxplot('source', 'Spearman correlation coefficient', data=tidy_df)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.title('Spearman correlations across 4096 experiments, '
'\nchecking the weights of the relevant features\nwhen the source is in training or in test')
plt.xlabel('Source')
plt.ylabel('Spearman correlation of feature weights')
plt.ylim([0,1])
plt.show()
def paired_ttest(weights_in, weights_out):
# TODO: also do it with the bayesian approach
# Null hypothesis: there is no weight difference.
from scipy.stats import ttest_1samp
differences = weights_in - weights_out
return ttest_1samp(differences, 0) # returns t and the p-value associated
def overall_ttest(source, model='logreg3', feats='ecfps1', dset='bcrp'):
ttests = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
for expid, weights_in, weights_out in zip(expids, all_weights_in, all_weights_out):
t, pvalue = paired_ttest(weights_in, weights_out)
ttests[expid] = (t, pvalue)
print expid, t, pvalue
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(ttests, writer, protocol=pickle.HIGHEST_PROTOCOL)
def ttest_per_substructure(source, model='logreg3', feats='ecfps1', dset='bcrp'):
ttests = {}
expids, all_weights_in, all_weights_out = substructs_weights_one_source(source=source, model=model, feats=feats,
dset=dset)
print np.array(all_weights_in).shape, np.array(all_weights_out).shape
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
all_weights_in = list(np.array(all_weights_in).T)
all_weights_out = list(np.array(all_weights_out).T)
print len(all_weights_in), len(all_weights_out)
for i, weights_in in enumerate(all_weights_in):
ttests[i2s[i]] = paired_ttest(weights_in, all_weights_out[i])
if i%10 == 0:
print i2s[i], weights_in
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_bysubstruct_' + source + '_' + model + '_' + feats + '.dict'), 'wb') as writer:
pickle.dump(ttests, writer, protocol=pickle.HIGHEST_PROTOCOL)
def do_job_question_3(source, model='logreg3', feats='ecfps1', dset='bcrp', significant=0.01):
# I really should make it faster
# Read the t-test per substructure file
significant_substructures = []
with open(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'paired_ttest_lso_bysubstruct_' + source + '_' + model + '_' + feats + '.dict'), 'rb') as reader:
ttests = pickle.load(reader)
for substructure, ttest_res in ttests.iteritems():
if ttest_res[1] <= significant:
if substructure_appears_in_source(substructure, source, dataset_nickname=dset) != (0,0):
print substructure, ttest_res[1]
significant_substructures.append(substructure)
return significant_substructures
def analyse_most_changing_substructs(source, dset='bcrp', model='logreg3', feats='ecfps1', top=10,
temp_dir='/home/floriane/Desktop'):
"""
Check which substructures are in the source among all those that were showing important changes in weight. Plots
the substructure using SmartsViewer.
"""
substructs_dict = most_changing_substructs_source(dset, model=model, feats=feats, source=source, top=top)
distinct_subs = len(substructs_dict)
indices_in_source = list(faster_relevant_feats(source, dset))
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
substructs_in_source = i2s[indices_in_source]
in_source_and_changing = [sub for sub in substructs_dict.keys() if sub in substructs_in_source ]
print in_source_and_changing
print "Proportion of substructures that most change in weight that actually appear in %s: %.2f" % \
(source, float(len(in_source_and_changing))/distinct_subs)
from chemdeco.integration import smartsviewer_utils
from PIL import Image
for substr in in_source_and_changing:
if len(substr) > 1: # let's remove the C and c...
svr = smartsviewer_utils.SmartsViewerRunner(w=200, h=200)
svr.depict(substr, op.join(temp_dir, substr + '.png'))
ims = [Image.open(f) for f in glob.glob(op.join(temp_dir, '*.png'))]
num_lines = math.ceil(float(len(ims))/4)
blank_image = Image.new("RGB", (800, int(num_lines*200)), color='white')
for i, im in enumerate(ims):
im.thumbnail((200,200), Image.ANTIALIAS)
blank_image.paste(im, (200 * (i%4), 200 * (i/4)))
blank_image.save(op.join(MANYSOURCES_ROOT, 'data', 'results', dset, 'substructs_in_%s_max_change_weight_%s_%s.png'
%(source, model, feats)))
# TODO: automatically remove the images from the temp_dir
def barplot_most_changing_substructs(dset='bcrp', model='logreg3', feats='ecfps1', top=10):
"""
Plots a 2-bar per source bar plot: first bar corresponds to the amount of substructures that most changed weight in
the in / out experiments. Second bar corresponds to the amount of those substructures that actually occur in the
source
"""
values_total = []
values_insource = []
sources = ManysourcesDataset(dset='bcrp').molecules().present_sources()
values_dict = {}
for source in sources:
print source
substructs_dict = most_changing_substructs_source(dset, model=model, feats=feats, source=source, top=top)
values_total.append(len(substructs_dict))
indices_in_source = list(faster_relevant_feats(source, dset))
i2s = ManysourcesDataset(dset).ecfps(no_dupes=True).i2s
substructs_in_source = i2s[indices_in_source]
in_source_and_changing = [sub for sub in substructs_dict.keys() if sub in substructs_in_source]
values_insource.append(len(in_source_and_changing))
values_dict['source'] = list(sources)
values_dict['total'] = values_total
values_dict['in source'] = values_insource
ind = np.arange(len(sources)) # the x locations for the groups
width = 0.35 # the width of the bars
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.set_palette('deep')
fig, ax = plt.subplots()
rects1 = ax.bar(ind, values_dict['total'], width, color='0.75')
rects2 = ax.bar(ind+width, values_dict['in source'], width)
ax.legend((rects1[0], rects2[0]), ('Total', 'In source only') )
locs, labels = plt.xticks(map(lambda x: x + width, ind), values_dict['source'])
plt.setp(labels, rotation=90)
plt.xlabel('Source')
plt.ylabel('Number of substructures most changing weight')
plt.ylim([0,320])
plt.show()
def losses_correl_weight_changes(dset='bcrp', model='logreg3', feats='ecfps1', expids=tuple(range(4096)), calib="'0.1'"):
"""
Plots the correlation between the average loss per source with the average relevant spearman correlation per source.
"""
# Copied from Santi but then changed a bit to fit my needs. Reads the losses for the given model
def read_cached():
cache_path = op.join(MANYSOURCES_ROOT, 'data', 'results', 'square_losses.h5')
result_coords = '/dset=bcrp/feats=ecfps1/model=logreg3/lso=True/score_calibration=\'0-1\''
with h5py.File(cache_path, 'r') as h5:
group = h5[result_coords]
infile_expids = group['expids'][()] if expids is not None else expids
if 0 == len(set(expids) - set(infile_expids[:, 0])):
e2r = {e: i for e, i in infile_expids if i >= 0}
ok_expids = [expid for expid in expids if expid in e2r]
rows = [e2r[expid] for expid in ok_expids]
losses = group['losses'][rows].T
molids = group['molids'][:]
return pd.DataFrame(losses, columns=ok_expids, index=molids)
losses = read_cached()
molids = list(losses.index)
#print molids
equivalence_to_source = ManysourcesDataset(dset).mols().molids2sources(molids)
losses['source'] = equivalence_to_source
df_mean_loss = losses.groupby('source').mean() # average loss per source per expid
dict_mean_loss = defaultdict(float)
for src in df_mean_loss.index:
dict_mean_loss[src] = np.array(list(df_mean_loss.loc[src])).mean()
df_mean_loss = pd.DataFrame.from_dict(dict_mean_loss, orient='index')
df_mean_loss.columns = ['average_loss']
big_dict = {}
# list all spearman files
for f in glob.glob(op.join(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'), 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
#print source
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
big_dict[source] = np.array(spearmans).mean()
df_mean_loss['spearmans'] = [big_dict[source] for source in df_mean_loss.index]
print df_mean_loss
# plot correlation
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.set_palette('deep')
seaborn.set_context(rc={'lines.markeredgewidth': 0.1}) # otherwise we only see regression line, see
#http://stackoverflow.com/questions/26618339/new-version-of-matplotlib-with-seaborn-line-markers-not-functioning
seaborn.lmplot('spearmans', 'average_loss', df_mean_loss, scatter_kws={"marker": ".", 's': 50})
#plt.scatter([big_dict[src] for src in sources], [dict_mean_loss[src] for src in sources])
plt.xlabel('Spearman coefficient correlation of feature importances')
plt.ylabel('Average loss when source is in external set')
plt.title('Absence of correlation between hardness to predict (high loss) \nand high change in feature weights '
'at the source level')
plt.show()
def relationship_spearman_size_source(dir, model='logreg3', feats='ecfps1', dset='bcrp'):
"""
Plots the relationship between the size of the source vs the average relevant Spearman corr coeff. One point per
source on the plot.
"""
small_dict = defaultdict(list)
# list all spearman files
for f in glob.glob(op.join(dir, 'spearmans_*')):
if 'relfeats' in op.basename(f):
source = op.basename(f).partition('_lso_relfeats_')[2].partition('_logreg')[0]
print source
small_dict['source'].append(source)
small_dict['size'].append(len(ManysourcesDataset(dset).mols().sources2molids([source])))
with open(f, 'rb') as reader:
dict_spearman = pickle.load(reader)
spearmans = map(lambda x: x[0], dict_spearman.values())
small_dict['average spearman'].append(np.mean(np.array(spearmans)))
df = pd.DataFrame.from_dict(small_dict)
import seaborn
seaborn.set_style("ticks")
seaborn.set_context("talk")
seaborn.lmplot('size', 'average spearman', data=df, scatter_kws={"marker": "o", "color": "slategray"},
line_kws={"linewidth": 1, "color": "seagreen"})
plt.show()
if __name__ == '__main__':
#sources = ManysourcesDataset(dset='hERG').molecules().present_sources()
#for source in sources:
# print source
# most_changing_substructs_source(source=source, dset='bcrp', top=10)
#ttest_per_substructure(source)
#plot_spearman_coefs_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#do_job_question_3('Zembruski_2011')
#cache_relevant_features_all_sources()
#plot_spearman_only_relevant_feats_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#plot_spearman_coefs_all_sources(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
#barplot_most_changing_substructs()
#proportion_relevant_features('flavonoids_Zhang_2004')
#print substructures_change_weight('flavonoids_Zhang_2004')
#relationship_spearman_size_source(op.join(MANYSOURCES_ROOT, 'data', 'results', 'bcrp'))
source = 'Ochoa-Puentes_2011'
analyse_most_changing_substructs(source, dset='bcrp')
| bsd-3-clause |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/test/test_complex.py | 61 | 27765 | import unittest
from test import support
from random import random
from math import atan2, isnan, copysign
import operator
INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close\""""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close\""""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
def test_truediv(self):
simple_real = [float(i) for i in range(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in range(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
# FIXME: The following currently crashes on Alpha
# self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
def test_truediv(self):
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
def test_floordiv(self):
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 1.5+0j)
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 0+0j)
def test_richcompare(self):
self.assertIs(complex.__eq__(1+1j, 1<<10000), False)
self.assertIs(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
for i in range(1, 100):
f = i / 100.0
self.assertIs(complex.__eq__(f+0j, f), True)
self.assertIs(complex.__ne__(f+0j, f), False)
self.assertIs(complex.__eq__(complex(f, f), f), False)
self.assertIs(complex.__ne__(complex(f, f), f), True)
self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented)
self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.le, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j)
self.assertIs(operator.eq(1+1j, 1+1j), True)
self.assertIs(operator.eq(1+1j, 2+2j), False)
self.assertIs(operator.ne(1+1j, 1+1j), False)
self.assertIs(operator.ne(1+1j, 2+2j), True)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_mod(self):
# % is no longer supported on complex numbers
self.assertRaises(TypeError, (1+1j).__mod__, 0+0j)
self.assertRaises(TypeError, lambda: (3.33+4.43j) % 0)
self.assertRaises(TypeError, (1+1j).__mod__, 4.3j)
def test_divmod(self):
self.assertRaises(TypeError, divmod, 1+1j, 1+0j)
self.assertRaises(TypeError, divmod, 1+1j, 0+0j)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
def test_boolcontext(self):
for i in range(100):
self.assertTrue(complex(random() + 1e-6, random() + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
class OS:
def __init__(self, value): self.value = value
def __complex__(self): return self.value
class NS(object):
def __init__(self, value): self.value = value
def __complex__(self): return self.value
self.assertEqual(complex(OS(1+10j)), 1+10j)
self.assertEqual(complex(NS(1+10j)), 1+10j)
self.assertRaises(TypeError, complex, OS(None))
self.assertRaises(TypeError, complex, NS(None))
self.assertRaises(TypeError, complex, {})
self.assertAlmostEqual(complex("1+10j"), 1+10j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10.0), 10+0j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10+0j), 10+0j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10.0), 1+10j)
self.assertAlmostEqual(complex(3.14+0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14), 3.14+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0j, 3.14), 3.14j)
self.assertAlmostEqual(complex(0.0, 3.14), 3.14j)
self.assertAlmostEqual(complex("1"), 1+0j)
self.assertAlmostEqual(complex("1j"), 1j)
self.assertAlmostEqual(complex(), 0)
self.assertAlmostEqual(complex("-1"), -1)
self.assertAlmostEqual(complex("+1"), +1)
self.assertAlmostEqual(complex("(1+2j)"), 1+2j)
self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j)
self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j)
self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j)
self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j)
self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j)
self.assertAlmostEqual(complex("J"), 1j)
self.assertAlmostEqual(complex("( j )"), 1j)
self.assertAlmostEqual(complex("+J"), 1j)
self.assertAlmostEqual(complex("( -j)"), -1j)
self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j)
self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j)
self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j)
class complex2(complex): pass
self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j)
self.assertAlmostEqual(complex(real=17, imag=23), 17+23j)
self.assertAlmostEqual(complex(real=17+23j), 17+23j)
self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j)
self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats. (These checks
# are harmless on systems without support for signed zeros.)
def split_zeros(x):
"""Function that produces different results for 0. and -0."""
return atan2(x, -1.)
self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.))
self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.))
self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.))
self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.))
c = 3.14 + 1j
self.assertTrue(complex(c) is c)
del c
self.assertRaises(TypeError, complex, "1", "1")
self.assertRaises(TypeError, complex, 1, "1")
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, float, 5+3j)
self.assertRaises(ValueError, complex, "")
self.assertRaises(TypeError, complex, None)
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(TypeError, complex, "1", "2")
self.assertRaises(TypeError, complex, "1", 42)
self.assertRaises(TypeError, complex, 1, "2")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
self.assertRaises(ValueError, complex, "x")
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
# check that complex accepts long unicode strings
self.assertEqual(type(complex("1"*500)), complex)
# check whitespace processing
self.assertEqual(complex('\N{EM SPACE}(\N{EN SPACE}1+1j ) '), 1+1j)
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
class float2:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertAlmostEqual(complex(float2(42.)), 42)
self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j)
self.assertRaises(TypeError, complex, float2(None))
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
self.assertAlmostEqual(complex(complex0(1j)), 42j)
self.assertAlmostEqual(complex(complex1(1j)), 2j)
self.assertRaises(TypeError, complex, complex2(1j))
def test_hash(self):
for x in range(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating point
self.assertEqual(hash(x), hash(complex(x, 0.)))
def test_abs(self):
nums = [complex(x/3., y/7.) for x in range(-9,9) for y in range(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
def test_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(1+6j, '(1+6j)')
test(1-6j, '(1-6j)')
test(-(1+0j), '(-1+-0j)', test_fn=self.assertNotEqual)
test(complex(1., INF), "(1+infj)")
test(complex(1., -INF), "(1-infj)")
test(complex(INF, 1), "(inf+1j)")
test(complex(-INF, INF), "(-inf+infj)")
test(complex(NAN, 1), "(nan+1j)")
test(complex(1, NAN), "(1+nanj)")
test(complex(NAN, NAN), "(nan+nanj)")
test(complex(0, INF), "infj")
test(complex(0, -INF), "-infj")
test(complex(0, NAN), "nanj")
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
@support.requires_IEEE_754
def test_negative_zero_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(complex(0., 1.), "1j")
test(complex(-0., 1.), "(-0+1j)")
test(complex(0., -1.), "-1j")
test(complex(-0., -1.), "(-0-1j)")
test(complex(0., 0.), "0j")
test(complex(0., -0.), "-0j")
test(complex(-0., 0.), "(-0+0j)")
test(complex(-0., -0.), "(-0-0j)")
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_file(self):
a = 3.33+4.43j
b = 5.1+2.3j
fo = None
try:
fo = open(support.TESTFN, "w")
print(a, b, file=fo)
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), ("%s %s\n" % (a, b)))
finally:
if (fo is not None) and (not fo.closed):
fo.close()
support.unlink(support.TESTFN)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
@support.requires_IEEE_754
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.))
self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.))
@support.requires_IEEE_754
def test_negated_imaginary_literal(self):
z0 = -0j
z1 = -7j
z2 = -1e1000j
# Note: In versions of Python < 3.2, a negated imaginary literal
# accidentally ended up with real part 0.0 instead of -0.0, thanks to a
# modification during CST -> AST translation (see issue #9011). That's
# fixed in Python 3.2.
self.assertFloatsAreIdentical(z0.real, -0.0)
self.assertFloatsAreIdentical(z0.imag, -0.0)
self.assertFloatsAreIdentical(z1.real, -0.0)
self.assertFloatsAreIdentical(z1.imag, -7.0)
self.assertFloatsAreIdentical(z2.real, -0.0)
self.assertFloatsAreIdentical(z2.imag, -INF)
@support.requires_IEEE_754
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@support.requires_IEEE_754
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertFloatsAreIdentical(z.real, roundtrip.real)
self.assertFloatsAreIdentical(z.imag, roundtrip.imag)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# Issue 7094: Alternate formatting (specified by #)
self.assertEqual(format(1+1j, '.0e'), '1e+00+1e+00j')
self.assertEqual(format(1+1j, '#.0e'), '1.e+00+1.e+00j')
self.assertEqual(format(1+1j, '.0f'), '1+1j')
self.assertEqual(format(1+1j, '#.0f'), '1.+1.j')
self.assertEqual(format(1.1+1.1j, 'g'), '1.1+1.1j')
self.assertEqual(format(1.1+1.1j, '#g'), '1.10000+1.10000j')
# Alternate doesn't make a difference for these, they format the same with or without it
self.assertEqual(format(1+1j, '.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '#.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '.1f'), '1.0+1.0j')
self.assertEqual(format(1+1j, '#.1f'), '1.0+1.0j')
# Misc. other alternate tests
self.assertEqual(format((-1.5+0.5j), '#f'), '-1.500000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '#.0f'), '-2.+0.j')
self.assertEqual(format((-1.5+0.5j), '#e'), '-1.500000e+00+5.000000e-01j')
self.assertEqual(format((-1.5+0.5j), '#.0e'), '-2.e+00+5.e-01j')
self.assertEqual(format((-1.5+0.5j), '#g'), '-1.50000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '.0g'), '-2+0.5j')
self.assertEqual(format((-1.5+0.5j), '#.0g'), '-2.+0.5j')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382
self.assertEqual(format(complex(NAN, NAN), 'f'), 'nan+nanj')
self.assertEqual(format(complex(1, NAN), 'f'), '1.000000+nanj')
self.assertEqual(format(complex(NAN, 1), 'f'), 'nan+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'f'), 'nan-1.000000j')
self.assertEqual(format(complex(NAN, NAN), 'F'), 'NAN+NANj')
self.assertEqual(format(complex(1, NAN), 'F'), '1.000000+NANj')
self.assertEqual(format(complex(NAN, 1), 'F'), 'NAN+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'F'), 'NAN-1.000000j')
self.assertEqual(format(complex(INF, INF), 'f'), 'inf+infj')
self.assertEqual(format(complex(1, INF), 'f'), '1.000000+infj')
self.assertEqual(format(complex(INF, 1), 'f'), 'inf+1.000000j')
self.assertEqual(format(complex(INF, -1), 'f'), 'inf-1.000000j')
self.assertEqual(format(complex(INF, INF), 'F'), 'INF+INFj')
self.assertEqual(format(complex(1, INF), 'F'), '1.000000+INFj')
self.assertEqual(format(complex(INF, 1), 'F'), 'INF+1.000000j')
self.assertEqual(format(complex(INF, -1), 'F'), 'INF-1.000000j')
def test_main():
support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
MazamaScience/ispaq | ispaq/concierge.py | 1 | 32479 | """
ISPAQ Data Access Expediter.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import os
import re
import glob
import pandas as pd
import obspy
from obspy.clients.fdsn import Client
from obspy.clients.fdsn.header import URL_MAPPINGS
# ISPAQ modules
from .user_request import UserRequest
from . import irisseismic
# Custom exceptions
class NoAvailableDataError(Exception):
"""No matching data are available."""
class Concierge(object):
"""
ISPAQ Data Access Expediter.
:type user_request: :class:`~ispaq.concierge.user_request`
:param user_request: User request containing the combination of command-line
arguments and information from the parsed user preferences file.
:rtype: :class:`~ispaq.concierge` or ``None``
:return: ISPAQ Concierge.
.. rubric:: Example
TODO: include doctest examples
"""
def __init__(self, user_request=None, logger=None):
"""
Initializes the ISPAQ data access expediter.
See :mod:`ispaq.concierge` for all parameters.
"""
# Keep the entire UserRequest and logger
self.user_request = user_request
self.logger = logger
# Copy important UserRequest properties to the Concierge for smpler access
self.requested_starttime = user_request.requested_starttime
self.requested_endtime = user_request.requested_endtime
self.metric_names = user_request.metrics
self.sncl_patterns = user_request.sncls
self.function_by_logic = user_request.function_by_logic
self.logic_types = user_request.function_by_logic.keys()
# Individual elements from the Preferences: section of the preferences file
self.csv_output_dir = user_request.csv_output_dir
self.plot_output_dir = user_request.plot_output_dir
self.sigfigs = user_request.sigfigs
# Output information
file_base = '%s_%s_%s' % (self.user_request.requested_metric_set,
self.user_request.requested_sncl_set,
self.requested_starttime.date)
self.output_file_base = self.csv_output_dir + '/' + file_base
# Availability dataframe is stored if it is read from a local file
self.availability = None
# Filtered availability dataframe is stored for potential reuse
self.filtered_availability = None
# Add dataselect clients and URLs or reference a local file
if user_request.dataselect_url in URL_MAPPINGS.keys():
# Get data from FDSN dataselect service
self.dataselect_url = URL_MAPPINGS[user_request.dataselect_url]
self.dataselect_client = Client(user_request.dataselect_url)
else:
if os.path.exists(os.path.abspath(user_request.dataselect_url)):
# Get data from local miniseed files
self.dataselect_url = os.path.abspath(user_request.dataselect_url)
self.dataselect_client = None
else:
err_msg = "Cannot find preference file dataselect_url: '%s'" % user_request.dataselect_url
self.logger.error(err_msg)
raise ValueError(err_msg)
# Add event clients and URLs or reference a local file
if user_request.event_url in URL_MAPPINGS.keys():
self.event_url = URL_MAPPINGS[user_request.event_url]
self.event_client = Client(user_request.event_url)
else:
if os.path.exists(os.path.abspath(user_request.event_url)):
# Get data from local QUAKEML files
self.event_url = os.path.abspath(user_request.event_url)
self.event_client = None
else:
err_msg = "Cannot find preference file event_url: '%s'" % user_request.event_url
self.logger.error(err_msg)
raise ValueError(err_msg)
# Add station clients and URLs or reference a local file
if user_request.station_url in URL_MAPPINGS.keys():
self.station_url = URL_MAPPINGS[user_request.station_url]
self.station_client = Client(user_request.station_url)
else:
if os.path.exists(os.path.abspath(user_request.station_url)):
# Get data from local StationXML files
self.station_url = os.path.abspath(user_request.station_url)
self.station_client = None
else:
err_msg = "Cannot find preference file station_url: '%s'" % user_request.station_url
self.logger.error(err_msg)
raise ValueError(err_msg)
def get_availability(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None, includerestricted=None,
latitude=None, longitude=None, minradius=None, maxradius=None):
"""
################################################################################
# getAvailability method returns a dataframe with information from the output
# of the fdsn station web service with "format=text&level=channel".
# With additional parameters, this webservice returns information on all
# matching SNCLs that have available data.
#
# The fdsnws/station/availability web service will return space characters for location
# codes that are SPACE SPACE.
#
# http://service.iris.edu/fdsnws/station/1/
#
# #Network | Station | Location | Channel | Latitude | Longitude | Elevation | Depth | Azimuth | Dip | Instrument | Scale | ScaleFreq | ScaleUnits | SampleRate | StartTime | EndTime
# CU|ANWB|00|LHZ|17.66853|-61.78557|39.0|0.0|0.0|-90.0|Streckeisen STS-2 Standard-gain|2.43609E9|0.05|M/S|1.0|2010-02-10T18:35:00|2599-12-31T23:59:59
#
################################################################################
if (!isGeneric("getAvailability")) {
setGeneric("getAvailability", function(obj, network, station, location, channel,
starttime, endtime, includerestricted,
latitude, longitude, minradius, maxradius) {
standardGeneric("getAvailability")
})
}
# END of R documentation
Returns a dataframe of SNCLs available from the `station_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for station data. Where arguments are
provided, these are used to override the information found in
`user_request.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
:type includerestricted: bool
:param includerestricted: Specify if results should include information
for restricted stations.
:type latitude: float
:param latitude: Specify the latitude to be used for a radius search.
:type longitude: float
:param longitude: Specify the longitude to the used for a radius
search.
:type minradius: float
:param minradius: Limit results to stations within the specified
minimum number of degrees from the geographic point defined by the
latitude and longitude parameters.
:type maxradius: float
:param maxradius: Limit results to stations within the specified
maximum number of degrees from the geographic point defined by the
latitude and longitude parameters.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_availability() #doctest: +ELLIPSIS
#[u'US.OXF..BHE', u'US.OXF..BHN', u'US.OXF..BHZ']
"""
# NOTE: Building the availability dataframe from a large StationXML is time consuming.
# NOTE: If we are using local station data then we should only do this once.
# Special case when using all defaults helps speed up any metrics making mutiple calls to get_availability
if (network is None and
station is None and
location is None and
channel is None and
starttime is None and
endtime is None and
self.filtered_availability is not None):
return(self.filtered_availability)
# Read from a local StationXML file one time only
if self.station_client is None:
# Only read/parse if we haven't already done so
if self.availability is None:
try:
self.logger.info("Reading StationXML file %s" % self.station_url)
sncl_inventory = obspy.read_inventory(self.station_url)
except Exception as e:
err_msg = "The StationXML file: '%s' is not valid" % self.station_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError(err_msg)
self.logger.debug('Building availability dataframe...')
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
snclId = n.code + "." + s.code + "." + c.location_code + "." + c.code
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None, # TODO: Figure out how to get instrument 'scale'
None, # TODO: Figure out how to get instrument 'scalefreq'
None, # TODO: Figure out how to get instrument 'scaleunits'
c.sample_rate,
c.start_date, c.end_date, snclId]
# Save this dataframe internally
self.logger.debug('Finished creating availability dataframe')
self.availability = df
# Container for all of the individual sncl_pattern dataframes generated
sncl_pattern_dataframes = []
# Loop through all sncl_patterns ---------------------------------------
for sncl_pattern in self.sncl_patterns:
# Get "User Reqeust" parameters
(UR_network, UR_station, UR_location, UR_channel) = sncl_pattern.split('.')
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if network is None:
_network = UR_network
else:
_network = network
if station is None:
_station = UR_station
else:
_station = station
if location is None:
_location = UR_location
else:
_location = location
if channel is None:
_channel = UR_channel
else:
_channel = channel
_sncl_pattern = "%s.%s.%s.%s" % (_network,_station,_location,_channel)
# Get availability dataframe ---------------------------------------
if self.station_client is None:
# Use internal dataframe
df = self.availability
else:
# Read from FDSN web services
try:
sncl_inventory = self.station_client.get_stations(starttime=_starttime, endtime=_endtime,
network=_network, station=_station,
location=_location, channel=_channel,
includerestricted=None,
latitude=latitude, longitude=longitude,
minradius=minradius, maxradius=maxradius,
level="channel")
except Exception as e:
err_msg = "No sncls matching %s found at %s" % (_sncl_pattern, self.station_url)
self.logger.debug(e)
self.logger.warning(err_msg)
continue
self.logger.debug('Building availability dataframe...')
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
snclId = n.code + "." + s.code + "." + c.location_code + "." + c.code
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None, # TODO: Figure out how to get instrument 'scale'
None, # TODO: Figure out how to get instrument 'scalefreq'
None, # TODO: Figure out how to get instrument 'scaleunits'
c.sample_rate,
c.start_date, c.end_date, snclId]
# Subset availability dataframe based on _sncl_pattern -------------
# NOTE: This shouldn't be necessary for dataframes obtained from FDSN
# NOTE: but it's quick so we always do it
# Create python regex from _sncl_pattern
# NOTE: Replace '.' first before introducing '.*' or '.'!
py_pattern = _sncl_pattern.replace('.','\\.').replace('*','.*').replace('?','.')
# Filter dataframe
df = df[df.snclId.str.contains(py_pattern)]
# Subset based on locally available data ---------------------------
if self.dataselect_client is None:
filename = '%s.%s.%s.%s.%s' % (_network, _station, _location, _channel, _starttime.strftime('%Y.%j'))
filepattern = self.dataselect_url + '/' + filename + '*' # Allow for possible quality codes
matching_files = glob.glob(filepattern)
if (len(matching_files) == 0):
err_msg = "No local waveforms matching %s" % filepattern
self.logger.debug(err_msg)
continue
else:
# Create a mask based on available file names
mask = df.snclId.str.contains("MASK WITH ALL FALSE")
for i in range(len(matching_files)):
basename = os.path.basename(matching_files[i])
match = re.match('[^\\.]*\\.[^\\.]*\\.[^\\.]*\\.[^\\.]*',basename)
sncl = match.group(0)
py_pattern = sncl.replace('.','\\.')
mask = mask | df.snclId.str.contains(py_pattern)
# Subset based on the mask
df = df[mask]
# Append this dataframe
if df.shape[0] == 0:
self.logger.debug("No SNCLS found matching '%s'" % _sncl_pattern)
else:
sncl_pattern_dataframes.append(df)
# END of sncl_patterns loop --------------------------------------------
if len(sncl_pattern_dataframes) == 0:
err_msg = "No available waveforms matching" + str(self.sncl_patterns)
self.logger.info(err_msg)
raise NoAvailableDataError(err_msg)
else:
availability = pd.concat(sncl_pattern_dataframes, ignore_index=True)
# TODO: remove duplicates
if availability.shape[0] == 0:
err_msg = "No available waveforms matching" + str(self.sncl_patterns)
self.logger.info(err_msg)
raise NoAvailableDataError(err_msg)
else:
# The concierge should remember this dataframe for metrics that
# make multiple calls to get_availability with all defaults.
self.filtered_availability = availability
return availability
def get_dataselect(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None, quality="B",
inclusiveEnd=True, ignoreEpoch=False):
"""
Returns an R Stream that can be passed to metrics calculation methods.
All arguments are required except for starttime and endtime. These arguments
may be specified but will default to the time information found in the
`user_request` used to generate a FDSN webservices request for MINIseed data.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.dataselect_client is None:
# Read local MINIseed file and convert to R_Stream
filename = '%s.%s.%s.%s.%s' % (network, station, location, channel, _starttime.strftime('%Y.%j'))
filepattern = self.dataselect_url + '/' + filename + '*' # Allow for possible quality codes
matching_files = glob.glob(filepattern)
if (len(matching_files) == 0):
self.logger.info("No files found matching '%s'" % (filepattern))
else:
filepath = matching_files[0]
if (len(matching_files) > 1):
self.logger.warning("Multiple files found matching" '%s -- using %s' % (filepattern, filepath))
try:
# Get the ObsPy version of the stream
py_stream = obspy.read(filepath)
py_stream = py_stream.slice(_starttime, _endtime)
# NOTE: ObsPy does not store state-of-health flags with each stream.
# NOTE: We need to read them in separately from the miniseed file.
flag_dict = obspy.io.mseed.util.get_timing_and_data_quality(filepath)
act_flags = [0,0,0,0,0,0,0,0] # TODO: Find a way to read act_flags
io_flags = [0,0,0,0,0,0,0,0] # TODO: Find a way to read io_flags
dq_flags = flag_dict['data_quality_flags']
# NOTE: ObsPy does not store station metadata with each trace.
# NOTE: We need to read them in separately from station metadata.
availability = self.get_availability(network, station, location, channel, _starttime, _endtime)
sensor = availability.instrument[0]
scale = availability.scale[0]
scalefreq = availability.scalefreq[0]
scaleunits = availability.scaleunits[0]
if sensor is None: sensor = "" # default from IRISSeismic Trace class prototype
if scale is None: scale = 1.0 # default from IRISSeismic Trace class prototype
if scalefreq is None: scalefreq = 1.0 # default from IRISSeismic Trace class prototype
if scaleunits is None: scaleunits = "" # default from IRISSeismic Trace class prototype
latitude = availability.latitude[0]
longitude = availability.longitude[0]
elevation = availability.elevation[0]
depth = availability.depth[0]
azimuth = availability.azimuth[0]
dip = availability.dip[0]
# Create the IRISSeismic version of the stream
r_stream = irisseismic.R_Stream(py_stream, _starttime, _endtime, act_flags, io_flags, dq_flags,
sensor, scale, scalefreq, scaleunits, latitude, longitude, elevation, depth, azimuth, dip)
except Exception as e:
err_msg = "Error reading in local waveform from %s" % filepath
self.logger.debug(e)
self.logger.error(err_msg)
raise
else:
# Read from FDSN web services
try:
r_stream = irisseismic.R_getDataselect(self.dataselect_url, network, station, location, channel, _starttime, _endtime, quality, inclusiveEnd, ignoreEpoch)
except Exception as e:
err_msg = "Error reading in waveform from %s webservice" % self.dataselect_client
self.logger.debug(e)
self.logger.error(err_msg)
raise
# TODO: Do we need to test for valid R_Stream.
if False:
return None # TODO: raise an exception
else:
return r_stream
def get_event(self,
starttime=None, endtime=None,
minmag=5.5, maxmag=None, magtype=None,
mindepth=None, maxdepth=None):
"""
################################################################################
# getEvent method returns seismic event data from the event webservice:
#
# http://service.iris.edu/fdsnws/event/1/
#
# TODO: The getEvent method could be fleshed out with a more complete list
# TODO: of arguments to be used as ws-event parameters.
################################################################################
# http://service.iris.edu/fdsnws/event/1/query?starttime=2013-02-01T00:00:00&endtime=2013-02-02T00:00:00&minmag=5&format=text
#
# #EventID | Time | Latitude | Longitude | Depth | Author | Catalog | Contributor | ContributorID | MagType | Magnitude | MagAuthor | EventLocationName
# 4075900|2013-02-01T22:18:33|-11.12|165.378|10.0|NEIC|NEIC PDE|NEIC PDE-Q||MW|6.4|GCMT|SANTA CRUZ ISLANDS
if (!isGeneric("getEvent")) {
setGeneric("getEvent", function(obj, starttime, endtime, minmag, maxmag, magtype,
mindepth, maxdepth) {
standardGeneric("getEvent")
})
}
# END of R documentation
Returns a dataframe of events returned by the `event_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for event data. Where arguments are
provided, these are used to override the information found in
`user_request.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
:type minmagnitude: float, optional
:param minmagnitude: Limit to events with a magnitude larger than the
specified minimum.
:type maxmagnitude: float, optional
:param maxmagnitude: Limit to events with a magnitude smaller than the
specified maximum.
:type magnitudetype: str, optional
:param magnitudetype: Specify a magnitude type to use for testing the
minimum and maximum limits.
:type mindepth: float, optional
:param mindepth: Limit to events with depth, in kilometers, larger than
the specified minimum.
:type maxdepth: float, optional
:param maxdepth: Limit to events with depth, in kilometers, smaller
than the specified maximum.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_event() #doctest: +ELLIPSIS
'
eventId time latitude longitude depth author...'
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.event_client is None:
# Read local QuakeML file
try:
event_catalog = obspy.read_events(self.event_url)
except Exception as e:
err_msg = "The StationXML file: '%s' is not valid." % self.station_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError(err_msg)
# events.columns
# Index([u'eventId', u'time', u'latitude', u'longitude', u'depth', u'author',
# u'cCatalog', u'contributor', u'contributorId', u'magType', u'magnitude',
# u'magAuthor', u'eventLocationName'],
# dtype='object')
#
dataframes = []
for event in event_catalog:
origin = event.preferred_origin()
magnitude = event.preferred_magnitude()
df = pd.DataFrame({'eventId': re.sub('.*eventid=','',event.resource_id.id),
'time': origin.time,
'latitude': origin.latitude,
'longitude': origin.longitude,
'depth': origin.depth/1000, # IRIS event webservice returns depth in km # TODO: check this
'author': origin.creation_info.author,
'cCatalog': None,
'contributor': None,
'contributorId': None,
'magType': magnitude.magnitude_type,
'magnitude': magnitude.mag,
'magAuthor': magnitude.creation_info.author,
'eventLocationName': event.event_descriptions[0].text},
index=[0])
dataframes.append(df)
# Concatenate into the events dataframe
events = pd.concat(dataframes, ignore_index=True)
else:
# Read from FDSN web services
# TODO: Need to make sure irisseismic.getEvent uses any FDSN site
try:
events = irisseismic.getEvent(starttime=_starttime,
endtime=_endtime,
minmag=minmag,
maxmag=maxmag,
magtype=magtype,
mindepth=mindepth,
maxdepth=maxdepth)
except Exception as e:
err_msg = "The event_url: '%s' returns an error" % (self.event_url)
self.logger.debug(e)
self.logger.error(err_msg)
raise
if events.shape[0] == 0:
return None # TODO: raise an exception
else:
return events
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| gpl-3.0 |
moosemaniam/eye_detect | eye_detect.py | 1 | 2293 | #!/usr/bin/env python
import cv
import cv2
def face_detect(imcolor):
# imcolor = cv.LoadImage('anush2.jpg') # input image
haarFace = cv.Load('haarcascade_frontalface_default.xml')
haarEyes = cv.Load('haarcascade_eye.xml')
# running the classifiers
storage = cv.CreateMemStorage()
detectedFace = cv.HaarDetectObjects(imcolor, haarFace, storage)
detectedEyes = cv.HaarDetectObjects(imcolor, haarEyes, storage)
actual_faces=[]
actual_eyes=[]
# draw a green rectangle where the face is detected
# draw a purple rectangle where the eye is detected
if detectedEyes:
count=0
for eye in detectedEyes:
#Only detect eye if within face
for face in detectedFace:
if(eye[0][0] >= face[0][0]):
actual_eyes.append(eye)
#Only actual face if , face has eye in it
for face in detectedFace:
eye_present=False
for eye in actual_eyes:
if((eye[0][0]>= face[0][0]) and (eye[0][3]<= face[0][3])):
eye_present = eye_present | True
if(eye_present==True):
actual_faces.append(face)
if detectedEyes:
for eye in actual_eyes:
cv.Rectangle(imcolor,(eye[0][0],eye[0][1]),
(eye[0][0]+eye[0][2],eye[0][1]+eye[0][3]),
cv.RGB(155, 55, 200),2)
if detectedFace:
for face in actual_faces:
cv.Rectangle(imcolor,(face[0][0],face[0][1]),
(face[0][0]+face[0][2],face[0][1]+face[0][3]),
cv.RGB(155, 255, 25),2)
cv.NamedWindow('Face Detection', cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage('Face Detection', imcolor)
class Target:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
#cv.NamedWindow("Target", 1)
def run(self):
# Capture first frame to get size
frame = cv.QueryFrame(self.capture)
frame_size = cv.GetSize(frame)
color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
while True:
color_image = cv.QueryFrame(self.capture)
face_detect(color_image)
c = cv.WaitKey(7) % 0x100
if c == 27:
break
if __name__=="__main__":
t = Target()
t.run()
| gpl-2.0 |
derekbrameyer/white-elephant | main.py | 1 | 8365 | __author__ = 'derekbrameyer'
import random
import json
import datetime
def main():
maxstealcount = 2
currentturn=1
boolines = json.loads(open("boo_lines.json").read())
reportlines = json.loads(open("report_lines.json").read())
print greenify("\nWelcome to White Elephant! Please input names line by line. When you are finished inputting names, press enter on a blank line.\n")
fullname = "tester"
participants = []
gifts = []
while fullname:
fullname = raw_input(greenify("Participant name: "))
participants.append(Participant(fullname, None))
participants.pop()
global should_save
should_save = raw_input(greenify("\nType 1 to also generate a document of the game: "))
if should_save is '1':
should_save = True
global save_document
filename = datetime.datetime.now().strftime("%Y%m%d") + "_" + datetime.datetime.now().strftime("%H%M") + "_white_elephant.txt"
print filename
save_document = open(filename, "w")
print "\nRandomizing the order...\n"
random.shuffle(participants)
participants.reverse()
firstparticipant = participants.pop()
print_and_save("=======================", True)
print_and_save(" TURN 1", True)
print_and_save("=======================", True)
print_and_save(firstparticipant.fullname + " is up first! What gift did they get?", False)
giftname = raw_input(greenify("The gift is a/an: "))
save_to_file("The gift is a/an: " + giftname)
gift = Gift(giftname, 0, firstparticipant)
firstparticipant.gift = gift
gifts.append(gift)
# 0 = steal
# 1 = pick new gift
previous_action = 1
while len(participants) > 0 or nextparticipant is not None:
# only iterate through the list if it was a random gift last time
if previous_action == 1:
nextparticipant = participants.pop()
currentturn += 1
giftsinturn = list(gifts)
if previous_action == 0:
print_and_save("\n\nWelp, we're on to " + nextparticipant.fullname + ". Are they stealing or picking a new gift?", False)
else:
print_and_save("Cool! An amazing " + gift.name + "! What a gift!\n\n", False)
print_and_save("=======================", True)
print_and_save(" TURN " + str(currentturn), True)
print_and_save("=======================", True)
print_and_save("Now we're on to " + nextparticipant.fullname + ". Are they stealing or picking a new gift?", False)
if len(giftsinturn) > 0:
action = raw_input(greenify("Input 1 to steal or 2 to pick a new gift: "))
save_to_file("Input 1 to steal or 2 to pick a new gift: " + action)
else:
print_and_save("Actually, looks like there are no gifts left to steal! Moving on...", False)
action = "0"
if action == "1":
print_and_save("We're stealing! What does " + nextparticipant.fullname + " want?\n", False)
displayCount = 1
for gift in giftsinturn:
print_and_save("Gift " + str(displayCount) + ": " + gift.name + " (Owner: " + gift.owner.fullname + ", Steals: " + str(gift.steals) + ")", False)
displayCount += 1
giftstealcount = maxstealcount + 1
while giftstealcount >= maxstealcount:
giftselection = raw_input(greenify("\nGift to steal (a number): "))
save_to_file("\nGift to steal (a number): " + giftselection)
stolengift = giftsinturn[int(giftselection) - 1]
giftstealcount = stolengift.steals
if giftstealcount >= maxstealcount:
print_and_save("I can't let you do that Star Fox! You'll have to select another gift.", False)
giftsinturn.remove(stolengift)
newowner = nextparticipant
nextparticipant = stolengift.owner
nextparticipant.gift = None
stolengift.owner = newowner
newowner.gift = stolengift
stolengift.steals += 1
random.shuffle(boolines)
print_and_save(boolines[0] % (newowner.fullname, stolengift.name, nextparticipant.fullname), False)
# TODO If gift has max steals, print something
if stolengift.steals >= maxstealcount:
print_and_save("Congrats to " + stolengift.owner.fullname + " for being the true owner of a shiny new " + stolengift.name + "!", False)
previous_action = 0
else:
print_and_save("What gift did " + nextparticipant.fullname + " get?", False)
giftname = raw_input(greenify("The gift is a/an: "))
save_to_file("The gift is a/an: " + giftname)
gift = Gift(giftname, 0, nextparticipant)
nextparticipant.gift = gift
gifts.append(gift)
previous_action = 1
nextparticipant = None
# wrap up with the first participant optionally stealing again
if previous_action == 0:
print_and_save("\n\n", False)
print_and_save("=======================", True)
print_and_save(" LAST TURN", True)
print_and_save("=======================", True)
print_and_save("Welp, we're almost done. Back to " + firstparticipant.fullname + ", who has the option to force a swap!", False)
else:
print_and_save("Cool! An amazing " + gift.name + "! What a gift!\n\n", False)
print_and_save("=======================", True)
print_and_save(" LAST TURN", True)
print_and_save("=======================", True)
print_and_save("Back to " + firstparticipant.fullname + ", who has the option to force a swap!", False)
print_and_save("Select the gift to swap for. If they're not swapping, input 0.\n", False)
displayCount = 1
owner_pivot_idx = 0
for gift in gifts:
# don't display the owner's gift
if gift.owner.fullname is firstparticipant.fullname:
owner_pivot_idx = displayCount - 1
continue
print_and_save("Gift " + str(displayCount) + ": " + gift.name + " (Owner: " + gift.owner.fullname + ", Steals: " + str(gift.steals) + ")", False)
displayCount += 1
giftstealcount = maxstealcount + 1
while giftstealcount >= maxstealcount:
giftselection = raw_input(greenify("\nGift to swap (a number): "))
save_to_file("\nGift to swap (a number): " + giftselection)
if giftselection == "0":
print_and_save("No swap! What a pal.", False)
break
if giftselection > owner_pivot_idx:
giftselection = str(int(giftselection) + 1)
gifttoswap = gifts[int(giftselection) - 1]
giftstealcount = gifttoswap.steals
if giftstealcount >= maxstealcount:
print_and_save("I can't let you do that Star Fox! You'll have to select another gift.", False)
if giftselection != "0":
oldowner = gifttoswap.owner
swappedgift = firstparticipant.gift
swappedgift.owner = oldowner
oldowner.gift = swappedgift
gifttoswap.owner = firstparticipant
firstparticipant.gift = gifttoswap
print_and_save("\nThat's a wrap! Here's what everyone ended up with:\n", False)
for gift in gifts:
random.shuffle(reportlines)
print_and_save(reportlines[0] % (gift.owner.fullname, gift.name), False)
print_and_save("\n\n", False)
if should_save is '1':
save_file.close()
def greenify(string):
attr = []
attr.append('32')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def print_and_save(string, should_greenify):
if should_greenify:
print greenify(string)
else:
print string
save_to_file(string)
def save_to_file(string):
global should_save
if should_save:
global save_document
save_document.write(string)
save_document.write("\n")
class Participant(object):
def __init__(self, name, gift):
self.fullname = name
self.gift = gift
class Gift(object):
def __init__(self, name, steals, owner):
self.name = name
self.steals = steals
self.owner = owner
should_save = False
save_document = None
if __name__ == "__main__":
main() | apache-2.0 |
Nesiehr/osf.io | scripts/tests/test_migrate_profile_websites.py | 40 | 3131 | from tests.factories import UserFactory
from nose.tools import *
from tests.base import OsfTestCase
from scripts.migration.migrate_personal_to_profile_websites import main, get_users_with_social_field
class TestMigrateProfileWebsites(OsfTestCase):
def setUp(self):
super(TestMigrateProfileWebsites, self).setUp()
self.user_one = UserFactory.build(
fullname='Martin Luther King',
social=dict(
github='userOneGithub',
scholar='userOneScholar',
personal='http://www.useronewebsite.com',
twitter='userOneTwitter',
linkedIn='userOneLinkedIn',
impactStory='userOneImpactStory',
orcid='userOneOrcid',
researcherId='userOneResearcherId',
),
)
self.user_one.save()
self.user_two = UserFactory.build(
fullname='el-Hajj Malik el-Shabazz',
social=dict(
github='userTwoGithub',
scholar='userTwoScholar',
profileWebsites=['http://www.usertwowebsite.com'],
twitter='userTwoTwitter',
linkedIn='userTwoLinkedIn',
impactStory='userTwoImpactStory',
orcid='userTwoOrcid',
researcherId='userTwoResearcherId'
)
)
self.user_two.save()
self.user_three = UserFactory()
def tearDown(self):
super(TestMigrateProfileWebsites, self).tearDown()
self.user_one.remove()
self.user_two.remove()
def test_get_users_with_social_field(self):
users = []
for user in get_users_with_social_field():
users.append(user._id)
assert_in(self.user_one._id, users)
assert_in(self.user_two._id, users)
assert_equal(len(users), 2)
def test_migrate_profile_websites(self):
main()
self.user_one.reload()
assert_equal(self.user_one.social['scholar'], 'userOneScholar')
assert_equal(self.user_one.social['profileWebsites'], ['http://www.useronewebsite.com'])
assert_equal(self.user_one.social['twitter'], 'userOneTwitter')
assert_equal(self.user_one.social['linkedIn'], 'userOneLinkedIn')
assert_equal(self.user_one.social['impactStory'], 'userOneImpactStory')
assert_equal(self.user_one.social['orcid'], 'userOneOrcid')
assert_equal(self.user_one.social['researcherId'], 'userOneResearcherId')
self.user_two.reload()
assert_equal(self.user_two.social['scholar'], 'userTwoScholar')
assert_equal(self.user_two.social['profileWebsites'], ['http://www.usertwowebsite.com'])
assert_equal(self.user_two.social['twitter'], 'userTwoTwitter')
assert_equal(self.user_two.social['linkedIn'], 'userTwoLinkedIn')
assert_equal(self.user_two.social['impactStory'], 'userTwoImpactStory')
assert_equal(self.user_two.social['orcid'], 'userTwoOrcid')
assert_equal(self.user_two.social['researcherId'], 'userTwoResearcherId')
assert_equal(self.user_three.social, {})
| apache-2.0 |
Tejal011089/trufil-erpnext | erpnext/accounts/doctype/bank_reconciliation/bank_reconciliation.py | 23 | 2290 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, nowdate
from frappe import msgprint, _
from frappe.model.document import Document
class BankReconciliation(Document):
def get_details(self):
if not (self.bank_account and self.from_date and self.to_date):
msgprint("Bank Account, From Date and To Date are Mandatory")
return
condition = ""
if not self.include_reconciled_entries:
condition = "and ifnull(clearance_date, '') in ('', '0000-00-00')"
dl = frappe.db.sql("""select t1.name, t1.cheque_no, t1.cheque_date, t2.debit,
t2.credit, t1.posting_date, t2.against_account, t1.clearance_date
from
`tabJournal Entry` t1, `tabJournal Entry Account` t2
where
t2.parent = t1.name and t2.account = %s
and t1.posting_date >= %s and t1.posting_date <= %s and t1.docstatus=1
and ifnull(t1.is_opening, 'No') = 'No' %s
order by t1.posting_date""" %
('%s', '%s', '%s', condition), (self.bank_account, self.from_date, self.to_date), as_dict=1)
self.set('journal_entries', [])
self.total_amount = 0.0
for d in dl:
nl = self.append('journal_entries', {})
nl.posting_date = d.posting_date
nl.voucher_id = d.name
nl.cheque_number = d.cheque_no
nl.cheque_date = d.cheque_date
nl.debit = d.debit
nl.credit = d.credit
nl.against_account = d.against_account
nl.clearance_date = d.clearance_date
self.total_amount += flt(d.debit) - flt(d.credit)
def update_details(self):
vouchers = []
for d in self.get('journal_entries'):
if d.clearance_date:
if d.cheque_date and getdate(d.clearance_date) < getdate(d.cheque_date):
frappe.throw(_("Clearance date cannot be before check date in row {0}").format(d.idx))
frappe.db.set_value("Journal Entry", d.voucher_id, "clearance_date", d.clearance_date)
frappe.db.sql("""update `tabJournal Entry` set clearance_date = %s, modified = %s
where name=%s""", (d.clearance_date, nowdate(), d.voucher_id))
vouchers.append(d.voucher_id)
if vouchers:
msgprint("Clearance Date updated in: {0}".format(", ".join(vouchers)))
else:
msgprint(_("Clearance Date not mentioned"))
| agpl-3.0 |
patriciolobos/desa8 | openerp/report/render/makohtml2html/__init__.py | 381 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from makohtml2html import parseNode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
algolia/mongo-connector | setup.py | 3 | 4391 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Topic :: Database
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Unix
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX
"""
import os
import platform
import sys
from distutils.core import Command
from distutils.dir_util import mkpath, remove_tree
from distutils.file_util import copy_file
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
extra_opts = {"test_suite": "tests",
"tests_require": ["mongo-orchestration>=0.2", "requests>=2.5.1"]}
if sys.version_info[:2] == (2, 6):
# Need unittest2 to run unittests in Python 2.6
extra_opts["tests_require"].append("unittest2")
extra_opts["test_suite"] = "unittest2.collector"
try:
with open("README.rst", "r") as fd:
extra_opts['long_description'] = fd.read()
except IOError:
pass # Install without README.rst
class InstallService(Command):
description = "Installs Mongo Connector as a Linux system daemon"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if platform.system() != 'Linux':
print("Must be running Linux")
elif os.geteuid() > 0:
print("Must be root user")
else:
mkpath("/var/log/mongo-connector")
mkpath("/etc/init.d")
copy_file("./config.json", "/etc/mongo-connector.json")
copy_file("./scripts/mongo-connector",
"/etc/init.d/mongo-connector")
class UninstallService(Command):
description = "Uninstalls Mongo Connector as a Linux system daemon"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def remove_file(self, path):
if os.path.exists(path):
os.remove(path)
print("removing '%s'" % path)
def run(self):
if platform.system() != 'Linux':
print("Must be running Linux")
elif os.geteuid() > 0:
print("Must be root user")
else:
if os.path.exists("/var/log/mongo-connector"):
remove_tree("/var/log/mongo-connector")
self.remove_file("/etc/mongo-connector.json")
self.remove_file("/etc/init.d/mongo-connector")
extra_opts['cmdclass'] = {
"install_service": InstallService,
"uninstall_service": UninstallService
}
setup(name='mongo-connector',
version="2.1.dev0",
author="MongoDB, Inc.",
author_email='mongodb-user@googlegroups.com',
description='Mongo Connector',
keywords=['mongo-connector', 'mongo', 'mongodb', 'solr', 'elasticsearch'],
url='https://github.com/10gen-labs/mongo-connector',
license="http://www.apache.org/licenses/LICENSE-2.0.html",
platforms=["any"],
classifiers=filter(None, classifiers.split("\n")),
install_requires=['pymongo >= 2.7.2, < 3.0.0',
'pysolr >= 3.1.0',
'elasticsearch >= 1.2',
'algoliasearch >= 1.5.4'],
packages=["mongo_connector", "mongo_connector.doc_managers"],
package_data={
'mongo_connector.doc_managers': ['schema.xml']
},
entry_points={
'console_scripts': [
'mongo-connector = mongo_connector.connector:main',
],
},
**extra_opts
)
| apache-2.0 |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/constants/si.py | 1 | 5527 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
# Planck constant
h = Constant('h', "Planck constant", 6.62606957e-34, 'J s', 0.00000029e-34,
'CODATA 2010', system='si')
# Reduced Planck constant
hbar = Constant('hbar', "Reduced Planck constant", h.value * 0.5 / np.pi,
'J s', h.uncertainty * 0.5 / np.pi, h.reference, system='si')
# Boltzmann constant
k_B = Constant('k_B', "Boltzmann constant", 1.3806488e-23, 'J / (K)',
0.0000013e-23, 'CODATA 2010', system='si')
# Speed of light
c = Constant('c', "Speed of light in vacuum", 2.99792458e8, 'm / (s)', 0.,
'CODATA 2010', system='si')
# Gravitional constant
G = Constant('G', "Gravitational constant", 6.67384e-11, 'm3 / (kg s2)',
0.00080e-11, 'CODATA 2010', system='si')
# Standard acceleration of gravity
g0 = Constant('g0', "Standard acceleration of gravity", 9.80665, 'm / s2', 0.0,
'CODATA 2010', system='si')
# Proton mass
m_p = Constant('m_p', "Proton mass", 1.672621777e-27, 'kg', 0.000000074e-27,
'CODATA 2010', system='si')
# Neutron mass
m_n = Constant('m_n', "Neutron mass", 1.674927351e-27, 'kg', 0.000000074e-27,
'CODATA 2010', system='si')
# Electron mass
m_e = Constant('m_e', "Electron mass", 9.10938291e-31, 'kg', 0.00000040e-31,
'CODATA 2010', system='si')
# Atomic mass
u = Constant('u', "Atomic mass", 1.660538921e-27, 'kg', 0.000000073e-27,
'CODATA 2010', system='si')
# Stefan-Boltzmann constant
sigma_sb = Constant('sigma_sb', "Stefan-Boltzmann constant", 5.670373e-8,
'W / (K4 m2)', 0.000021e-8, 'CODATA 2010', system='si')
# Electron charge; EM constants require a system to be specified
e = EMConstant('e', 'Electron charge', 1.602176565e-19, 'C', 0.000000035e-19,
'CODATA 2010', system='si')
# Electric constant
eps0 = EMConstant('eps0', 'Electric constant', 8.854187817e-12, 'F/m', 0.0,
'CODATA 2010', system='si')
# Avogadro's number
N_A = Constant('N_A', "Avogadro's number", 6.02214129e23, '1 / (mol)',
0.00000027e23, 'CODATA 2010', system='si')
# Gas constant
R = Constant('R', "Gas constant", 8.3144621, 'J / (K mol)', 0.0000075,
'CODATA 2010', system='si')
# Rydberg constant
Ryd = Constant('Ryd', 'Rydberg constant', 10973731.568539, '1 / (m)', 0.000055,
'CODATA 2010', system='si')
# Bohr radius
a0 = Constant('a0', "Bohr radius", 0.52917721092e-10, 'm', 0.00000000017e-10,
'CODATA 2010', system='si')
# Bohr magneton
muB = Constant('muB', "Bohr magneton", 927.400968e-26, 'J/T', 0.00002e-26,
'CODATA 2010', system='si')
# Fine structure constant
alpha = Constant('alpha', "Fine-structure constant", 7.2973525698e-3, '',
0.0000000024e-3, 'CODATA 2010', system='si')
# Atmosphere
atm = Constant('atmosphere', "Atmosphere", 101325, 'Pa', 0.0,
'CODATA 2010', system='si')
# Magnetic constant
mu0 = Constant('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0,
'CODATA 2010', system='si')
# DISTANCE
# Astronomical Unit
au = Constant('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0,
"IAU 2012 Resolution B2", system='si')
# Parsec
pc = Constant('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm',
au.uncertainty / np.tan(np.radians(1. / 3600.)),
"Derived from au", system='si')
# Kiloparsec
kpc = Constant('kpc', "Kiloparsec",
1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm',
1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)),
"Derived from au", system='si')
# Wien wavelength displacement law constant
b_wien = Constant('b_wien', 'Wien wavelength displacement law constant',
2.8977721e-3, 'm K', 0.0000026e-3, 'CODATA 2010', system='si')
# SOLAR QUANTITIES
# Solar luminosity
L_sun = Constant('L_sun', "Solar luminosity", 3.846e26, 'W', 0.0005e26,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Solar mass
M_sun = Constant('M_sun', "Solar mass", 1.9891e30, 'kg', 0.00005e30,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Solar radius
R_sun = Constant('R_sun', "Solar radius", 6.95508e8, 'm', 0.00026e8,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# OTHER SOLAR SYSTEM QUANTITIES
# Jupiter mass
M_jup = Constant('M_jup', "Jupiter mass", 1.8987e27, 'kg', 0.00005e27,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Jupiter equatorial radius
R_jup = Constant('R_jup', "Jupiter equatorial radius", 7.1492e7, 'm',
0.00005e7, "Allen's Astrophysical Quantities 4th Ed.",
system='si')
# Earth mass
M_earth = Constant('M_earth', "Earth mass", 5.9742e24, 'kg', 0.00005e24,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Earth equatorial radius
R_earth = Constant('R_earth', "Earth equatorial radius", 6.378136e6, 'm',
0.0000005e6, "Allen's Astrophysical Quantities 4th Ed.",
system='si')
| mit |
akaminsky/ghost_blog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/monokai.py | 364 | 5080 | # -*- coding: utf-8 -*-
"""
pygments.styles.monokai
~~~~~~~~~~~~~~~~~~~~~~~
Mimic the Monokai color scheme. Based on tango.py.
http://www.monokai.nl/blog/2006/07/15/textmate-color-theme/
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class MonokaiStyle(Style):
"""
This style mimics the Monokai color scheme.
"""
background_color = "#272822"
highlight_color = "#49483e"
styles = {
# No corresponding class for the following:
Text: "#f8f8f2", # class: ''
Whitespace: "", # class: 'w'
Error: "#960050 bg:#1e0010", # class: 'err'
Other: "", # class 'x'
Comment: "#75715e", # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: "#66d9ef", # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: "#f92672", # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: "", # class: 'kt'
Operator: "#f92672", # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: "#f8f8f2", # class: 'p'
Name: "#f8f8f2", # class: 'n'
Name.Attribute: "#a6e22e", # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: "#a6e22e", # class: 'nc' - to be revised
Name.Constant: "#66d9ef", # class: 'no' - to be revised
Name.Decorator: "#a6e22e", # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: "#a6e22e", # class: 'ne'
Name.Function: "#a6e22e", # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: "", # class: 'nn' - to be revised
Name.Other: "#a6e22e", # class: 'nx'
Name.Tag: "#f92672", # class: 'nt' - like a keyword
Name.Variable: "", # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: "#ae81ff", # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: "#ae81ff", # class: 'l'
Literal.Date: "#e6db74", # class: 'ld'
String: "#e6db74", # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: "", # class: 'sc'
String.Doc: "", # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: "#ae81ff", # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: "", # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: "", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
Generic.Inserted: "", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| mit |
maellak/invenio | modules/miscutil/lib/textutils_unit_tests.py | 8 | 30192 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the textutils library."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
try:
import chardet
CHARDET_AVAILABLE = True
except ImportError:
CHARDET_AVAILABLE = False
try:
from unidecode import unidecode
UNIDECODE_AVAILABLE = True
except ImportError:
UNIDECODE_AVAILABLE = False
from invenio.textutils import \
wrap_text_in_a_box, \
guess_minimum_encoding, \
wash_for_xml, \
wash_for_utf8, \
decode_to_unicode, \
translate_latex2unicode, \
translate_to_ascii, \
strip_accents, \
transliterate_ala_lc, \
escape_latex, \
show_diff
from invenio.testutils import make_test_suite, run_test_suite
class GuessMinimumEncodingTest(InvenioTestCase):
"""Test functions related to guess_minimum_encoding function."""
def test_guess_minimum_encoding(self):
"""textutils - guess_minimum_encoding."""
self.assertEqual(guess_minimum_encoding('patata'), ('patata', 'ascii'))
self.assertEqual(guess_minimum_encoding('àèéìòù'), ('\xe0\xe8\xe9\xec\xf2\xf9', 'latin1'))
self.assertEqual(guess_minimum_encoding('Ιθάκη'), ('Ιθάκη', 'utf8'))
class WashForXMLTest(InvenioTestCase):
"""Test functions related to wash_for_xml function."""
def test_latin_characters_washing_1_0(self):
"""textutils - washing latin characters for XML 1.0."""
self.assertEqual(wash_for_xml('àèéìòùÀ'), 'àèéìòùÀ')
def test_latin_characters_washing_1_1(self):
"""textutils - washing latin characters for XML 1.1."""
self.assertEqual(wash_for_xml('àèéìòùÀ', xml_version='1.1'), 'àèéìòùÀ')
def test_chinese_characters_washing_1_0(self):
"""textutils - washing chinese characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ'''), '''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''')
def test_chinese_characters_washing_1_1(self):
"""textutils - washing chinese characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''', xml_version='1.1'), '''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''')
def test_greek_characters_washing_1_0(self):
"""textutils - washing greek characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.'''), '''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''')
def test_greek_characters_washing_1_1(self):
"""textutils - washing greek characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''',
xml_version='1.1'), '''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''')
def test_russian_characters_washing_1_0(self):
"""textutils - washing greek characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''', xml_version='1.1'), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_russian_characters_washing_1_1(self):
"""textutils - washing greek characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''', xml_version='1.1'), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_illegal_characters_washing_1_0(self):
"""textutils - washing illegal characters for XML 1.0."""
self.assertEqual(wash_for_xml(chr(8) + chr(9) + 'some chars'), '\tsome chars')
self.assertEqual(wash_for_xml('$b\bar{b}$'), '$bar{b}$')
def test_illegal_characters_washing_1_1(self):
"""textutils - washing illegal characters for XML 1.1."""
self.assertEqual(wash_for_xml(chr(8) + chr(9) + 'some chars',
xml_version='1.1'), '\x08\tsome chars')
self.assertEqual(wash_for_xml('$b\bar{b}$', xml_version='1.1'), '$b\x08ar{b}$')
class WashForUTF8Test(InvenioTestCase):
def test_normal_legal_string_washing(self):
"""textutils - testing UTF-8 washing on a perfectly normal string"""
some_str = "This is an example string"
self.assertEqual(some_str, wash_for_utf8(some_str))
def test_chinese_string_washing(self):
"""textutils - testing washing functions on chinese script"""
some_str = """春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ"""
self.assertEqual(some_str, wash_for_utf8(some_str))
def test_russian_characters_washing(self):
"""textutils - washing Russian characters for UTF-8"""
self.assertEqual(wash_for_utf8('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!'''), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_remove_incorrect_unicode_characters(self):
"""textutils - washing out the incorrect characters"""
self.assertEqual(wash_for_utf8("Ź\206dź\204bło żół\203wia \202"), "Źdźbło żółwia ")
def test_empty_string_wash(self):
"""textutils - washing an empty string"""
self.assertEqual(wash_for_utf8(""), "")
def test_only_incorrect_unicode_wash(self):
"""textutils - washing an empty string"""
self.assertEqual(wash_for_utf8("\202\203\204\205"), "")
def test_raising_exception_on_incorrect(self):
"""textutils - assuring an exception on incorrect input"""
self.assertRaises(UnicodeDecodeError, wash_for_utf8, "\202\203\204\205", correct=False)
def test_already_utf8_input(self):
"""textutils - washing a Unicode string into UTF-8 binary string"""
self.assertEqual('Göppert', wash_for_utf8(u'G\xf6ppert', True))
class WrapTextInABoxTest(InvenioTestCase):
"""Test functions related to wrap_text_in_a_box function."""
def test_plain_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box plain."""
result = """
**********************************************
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo bar'), result)
def test_empty_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box empty."""
result = """
**********************************************
**********************************************
"""
self.assertEqual(wrap_text_in_a_box(), result)
def test_with_title_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box with title."""
result = """
**********************************************
** a Title! **
** **************************************** **
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo bar', title='a Title!'), result)
def test_multiline_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box multiline."""
result = """
**********************************************
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo\n bar'), result)
def test_real_multiline_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box real multiline."""
result = """
**********************************************
** foo **
** bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo\n\nbar'), result)
def test_real_no_width_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box no width."""
result = """
************
** foobar **
************
"""
self.assertEqual(wrap_text_in_a_box('foobar', min_col=0), result)
def test_real_nothing_at_all_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box nothing at all."""
result = """
******
******
"""
self.assertEqual(wrap_text_in_a_box(min_col=0), result)
def test_real_squared_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box squared style."""
result = """
+--------+
| foobar |
+--------+
"""
self.assertEqual(wrap_text_in_a_box('foobar', style='squared', min_col=0), result)
def test_indented_text_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box indented text."""
text = """
def test_real_squared_wrap_text_in_a_box(self):\n
\"""wrap_text_in_a_box - squared style.\"""\n
result = \"""\n
+--------+\n
| foobar |\n
+--------+
\"""
"""
result = """
******************************
** def test_real_square **
** d_wrap_text_in_a_box **
** (self): **
** \"""wrap_text_in_ **
** a_box - squared **
** style.\""" **
** result = \""" **
** +--------+ **
** | foobar | **
** +--------+\""" **
******************************
"""
self.assertEqual(wrap_text_in_a_box(text, min_col=0, max_col=30, break_long=True), result)
def test_single_new_line_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box single new line."""
result = """
**********************************************
** ciao come và? **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box("ciao\ncome và?"), result)
def test_indented_box_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box indented box."""
result = """
**********************************************
** foobar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foobar', tab_num=1), result)
def test_real_conclusion_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box conclusion."""
result = """----------------------------------------
foobar \n"""
self.assertEqual(wrap_text_in_a_box('foobar', style='conclusion'), result)
def test_real_longtext_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box long text."""
text = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat."""
result = """
************************************************************************
** Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do **
** eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut **
** enim ad minim veniam, quis nostrud exercitation ullamco laboris **
** nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in **
** reprehenderit in voluptate velit esse cillum dolore eu fugiat **
** nulla pariatur. Excepteur sint occaecat cupidatat non proident, **
** sunt in culpa qui officia deserunt mollit anim id est laborum. **
** At vero eos et accusamus et iusto odio dignissimos ducimus qui **
** blanditiis praesentium voluptatum deleniti atque corrupti quos **
** dolores et quas molestias excepturi sint occaecati cupiditate non **
** provident, similique sunt in culpa qui officia deserunt mollitia **
** animi, id est laborum et dolorum fuga. Et harum quidem rerum **
** facilis est et expedita distinctio. Nam libero tempore, cum soluta **
** nobis est eligendi optio cumque nihil impedit quo minus id quod **
** maxime placeat facere possimus, omnis voluptas assumenda est, **
** omnis dolor repellendus. Temporibus autem quibusdam et aut **
** officiis debitis aut rerum necessitatibus saepe eveniet ut et **
** voluptates repudiandae sint et molestiae non recusandae. Itaque **
** earum rerum hic tenetur a sapiente delectus, ut aut reiciendis **
** voluptatibus maiores alias consequatur aut perferendis doloribus **
** asperiores repellat. **
************************************************************************
"""
self.assertEqual(wrap_text_in_a_box(text), result)
class DecodeToUnicodeTest(InvenioTestCase):
"""Test functions related to decode_to_unicode function."""
if CHARDET_AVAILABLE:
def test_decode_to_unicode(self):
"""textutils - decode_to_unicode."""
self.assertEqual(decode_to_unicode('\202\203\204\205', default_encoding='latin1'), u'\x82\x83\x84\x85')
self.assertEqual(decode_to_unicode('àèéìòù'), u'\xe0\xe8\xe9\xec\xf2\xf9')
self.assertEqual(decode_to_unicode('Ιθάκη'), u'\u0399\u03b8\u03ac\u03ba\u03b7')
else:
pass
class Latex2UnicodeTest(InvenioTestCase):
"""Test functions related to translating LaTeX symbols to Unicode."""
def test_latex_to_unicode(self):
"""textutils - latex_to_unicode"""
self.assertEqual(translate_latex2unicode("\\'a \\'i \\'U").encode('utf-8'), "á í Ú")
self.assertEqual(translate_latex2unicode("\\'N \\k{i}"), u'\u0143 \u012f')
self.assertEqual(translate_latex2unicode("\\AAkeson"), u'\u212bkeson')
self.assertEqual(translate_latex2unicode("$\\mathsl{\\Zeta}$"), u'\U0001d6e7')
class TestStripping(InvenioTestCase):
"""Test for stripping functions like accents and control characters."""
if UNIDECODE_AVAILABLE:
def test_text_to_ascii(self):
"""textutils - transliterate to ascii using unidecode"""
self.assert_(translate_to_ascii(
["á í Ú", "H\xc3\xb6hne", "Åge Øst Vær", "normal"]) in
(["a i U", "Hohne", "Age Ost Vaer", "normal"], ## unidecode < 0.04.13
['a i U', 'Hoehne', 'Age Ost Vaer', 'normal']) ## unidecode >= 0.04.13
)
self.assertEqual(translate_to_ascii("àèéìòù"), ["aeeiou"])
self.assertEqual(translate_to_ascii("ß"), ["ss"])
self.assertEqual(translate_to_ascii(None), None)
self.assertEqual(translate_to_ascii([]), [])
self.assertEqual(translate_to_ascii([None]), [None])
else:
pass
def test_strip_accents(self):
"""textutils - transliterate to ascii (basic)"""
self.assertEqual("memememe",
strip_accents('mémêmëmè'))
self.assertEqual("MEMEMEME",
strip_accents('MÉMÊMËMÈ'))
self.assertEqual("oe",
strip_accents('œ'))
self.assertEqual("OE",
strip_accents('Œ'))
class TestDiffering(InvenioTestCase):
"""Test for differing two strings."""
string1 = """Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
posuere lacus id erat tristique pulvinar. Morbi volutpat, diam
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
Nam iaculis lacinia nisl, enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra."""
string2 = """Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
posuere lacus id erat.
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra."""
def test_show_diff_plain_text(self):
"""textutils - show_diff() with plain text"""
expected_result = """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
-posuere lacus id erat.
+posuere lacus id erat tristique pulvinar. Morbi volutpat, diam
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
-Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin
+Nam iaculis lacinia nisl, enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
-placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
-accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.
+accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra.
"""
self.assertEqual(show_diff(self.string1, self.string2), expected_result)
def test_show_diff_html(self):
"""textutils - show_diff() with plain text"""
expected_result = """<pre>
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
<strong class="diff_field_deleted">posuere lacus id erat.</strong>
<strong class="diff_field_added">posuere lacus id erat tristique pulvinar. Morbi volutpat, diam</strong>
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
<strong class="diff_field_deleted">Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin</strong>
<strong class="diff_field_added">Nam iaculis lacinia nisl, enim sollicitudin</strong>
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
<strong class="diff_field_deleted">placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet</strong>
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
<strong class="diff_field_deleted">accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.</strong>
<strong class="diff_field_added">accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.</strong>
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra.
</pre>"""
self.assertEqual(show_diff(self.string1,
self.string2,
prefix="<pre>", suffix="</pre>",
prefix_unchanged='',
suffix_unchanged='',
prefix_removed='<strong class="diff_field_deleted">',
suffix_removed='</strong>',
prefix_added='<strong class="diff_field_added">',
suffix_added='</strong>'), expected_result)
class TestALALC(InvenioTestCase):
"""Test for handling ALA-LC transliteration."""
if UNIDECODE_AVAILABLE:
def test_alalc(self):
msg = "眾鳥高飛盡"
encoded_text, encoding = guess_minimum_encoding(msg)
unicode_text = unicode(encoded_text.decode(encoding))
self.assertEqual("Zhong Niao Gao Fei Jin ",
transliterate_ala_lc(unicode_text))
class LatexEscape(InvenioTestCase):
"""Test for escape latex function"""
def test_escape_latex(self):
unescaped = "this is unescaped latex & % $ # _ { } ~ \ ^ and some multi-byte chars: żółw mémêmëmè"
escaped = escape_latex(unescaped)
self.assertEqual(escaped,
"this is unescaped latex \\& \\% \\$ \\# \\_ \\{ \\} \\~{} \\textbackslash{} \\^{} and some multi-byte chars: \xc5\xbc\xc3\xb3\xc5\x82w m\xc3\xa9m\xc3\xaam\xc3\xabm\xc3\xa8")
TEST_SUITE = make_test_suite(WrapTextInABoxTest, GuessMinimumEncodingTest,
WashForXMLTest, WashForUTF8Test, DecodeToUnicodeTest,
Latex2UnicodeTest, TestStripping,
TestALALC, TestDiffering)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
gzorin/RSXGL | extsrc/mesa/scons/crossmingw.py | 13 | 8201 | """SCons.Tool.gcc
Tool-specific initialization for MinGW (http://www.mingw.org/)
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
See also http://www.scons.org/wiki/CrossCompilingMingw
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import string
import SCons.Action
import SCons.Builder
import SCons.Tool
import SCons.Util
# This is what we search for to find mingw:
prefixes32 = SCons.Util.Split("""
mingw32-
mingw32msvc-
i386-mingw32-
i486-mingw32-
i586-mingw32-
i686-mingw32-
i386-mingw32msvc-
i486-mingw32msvc-
i586-mingw32msvc-
i686-mingw32msvc-
i686-pc-mingw32-
i686-w64-mingw32-
""")
prefixes64 = SCons.Util.Split("""
x86_64-w64-mingw32-
amd64-mingw32-
amd64-mingw32msvc-
amd64-pc-mingw32-
""")
def find(env):
if env['machine'] == 'x86_64':
prefixes = prefixes64
else:
prefixes = prefixes32
for prefix in prefixes:
# First search in the SCons path and then the OS path:
if env.WhereIs(prefix + 'gcc') or SCons.Util.WhereIs(prefix + 'gcc'):
return prefix
return ''
def shlib_generator(target, source, env, for_signature):
cmd = SCons.Util.CLVar(['$SHLINK', '$SHLINKFLAGS'])
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.append('-Wl,--out-implib,'+implib.get_string(for_signature))
def_target = env.FindIxes(target, 'WIN32DEFPREFIX', 'WIN32DEFSUFFIX')
if def_target: cmd.append('-Wl,--output-def,'+def_target.get_string(for_signature))
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError, "A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX")
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Append an import library to the list of targets.
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX'))
# Append a def file target if there isn't already a def file target
# or a def file source. There is no option to disable def file
# target emitting, because I can't figure out why someone would ever
# want to turn it off.
def_source = env.FindIxes(source, 'WIN32DEFPREFIX', 'WIN32DEFSUFFIX')
def_target = env.FindIxes(target, 'WIN32DEFPREFIX', 'WIN32DEFSUFFIX')
if not def_source and not def_target:
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'WIN32DEFPREFIX', 'WIN32DEFSUFFIX'))
return (target, source)
shlib_action = SCons.Action.Action(shlib_generator, '$SHLINKCOMSTR', generator=1)
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action, suffix='.o',
source_scanner=SCons.Tool.SourceFileScanner)
SCons.Tool.SourceFileScanner.add_scanner('.rc', SCons.Defaults.CScan)
def compile_without_gstabs(env, sources, c_file):
'''This is a hack used to compile some source files without the
-gstabs option.
It seems that some versions of mingw32's gcc (4.4.2 at least) die
when compiling large files with the -gstabs option. -gstabs is
related to debug symbols and can be omitted from the effected
files.
This function compiles the given c_file without -gstabs, removes
the c_file from the sources list, then appends the new .o file to
sources. Then return the new sources list.
'''
# Modify CCFLAGS to not have -gstabs option:
env2 = env.Clone()
flags = str(env2['CCFLAGS'])
flags = flags.replace("-gstabs", "")
env2['CCFLAGS'] = SCons.Util.CLVar(flags)
# Build the special-case files:
obj_file = env2.SharedObject(c_file)
# Replace ".cpp" or ".c" with ".o"
o_file = c_file.replace(".cpp", ".o")
o_file = o_file.replace(".c", ".o")
# Replace the .c files with the specially-compiled .o file
sources.remove(c_file)
sources.append(o_file)
return sources
def generate(env):
mingw_prefix = find(env)
if mingw_prefix:
dir = os.path.dirname(env.WhereIs(mingw_prefix + 'gcc') or SCons.Util.WhereIs(mingw_prefix + 'gcc'))
# The mingw bin directory must be added to the path:
path = env['ENV'].get('PATH', [])
if not path:
path = []
if SCons.Util.is_String(path):
path = string.split(path, os.pathsep)
env['ENV']['PATH'] = string.join([dir] + path, os.pathsep)
# Most of mingw is the same as gcc and friends...
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas']
for tool in gnu_tools:
SCons.Tool.Tool(tool)(env)
#... but a few things differ:
env['CC'] = mingw_prefix + 'gcc'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['CXX'] = mingw_prefix + 'g++'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = shlib_action
env.Append(SHLIBEMITTER = [shlib_emitter])
env['LINK'] = mingw_prefix + 'g++'
env['AR'] = mingw_prefix + 'ar'
env['RANLIB'] = mingw_prefix + 'ranlib'
env['LINK'] = mingw_prefix + 'g++'
env['AS'] = mingw_prefix + 'as'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = mingw_prefix + 'windres'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS ${INCPREFIX}${SOURCE.dir} $RCFLAGS -i $SOURCE -o $TARGET'
env['BUILDERS']['RES'] = res_builder
# Some setting from the platform also have to be overridden:
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ 'lib', '' ]
env['LIBSUFFIXES'] = [ '.a', '.lib' ]
# MinGW x86 port of gdb does not handle well dwarf debug info which is the
# default in recent gcc versions. The x64 port gdb from mingw-w64 seems to
# handle it fine though, so stick with the default there.
if env['machine'] != 'x86_64':
env.AppendUnique(CCFLAGS = ['-gstabs'])
env.AddMethod(compile_without_gstabs, 'compile_without_gstabs')
def exists(env):
return find(env)
| bsd-2-clause |
duyet-website/api.duyet.net | lib/requests/packages/urllib3/contrib/socks.py | 218 | 6195 | # -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4
- SOCKS4a
- SOCKS5
- Usernames and passwords for the SOCKS proxy
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail. You must use a domain
name.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
proxy_rdns=self._socks_options['rdns'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == 'socks5h':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == 'socks4a':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
'rdns': rdns
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| mit |
tgstation/tgstation | tools/discordRoleScript/Script.py | 18 | 2007 | # Script to role discord members who have already associated their BYOND account
# Author: AffectedArc07
# From Discord API:
# Clients are allowed 120 events every 60 seconds, meaning you can send on average at a rate of up to 2 events per second.
# So lets send every 0.6 seconds to ensure we arent rate capped
####### CONFIG ######
# Discord section. Make sure the IDs are strings to avoid issues with IDs that start with a 0
botToken = "Put your discord bot token here"
guildID = "000000000000000000"
roleID = "000000000000000000"
# SS13 Database section
dbHost = "127.0.0.1"
dbUser = "root"
dbPass = "your password here"
dbDatabase = "tg_db"
##### DO NOT TOUCH ANYTHING BELOW HERE UNLESS YOURE FAMILIAR WITH PYTHON #####
import requests, mysql.connector, time
# Connect to DB
dbCon = mysql.connector.connect(
host = dbHost,
user = dbUser,
passwd = dbPass,
database = dbDatabase
)
cur = dbCon.cursor()
# Grab all users who need to be processed
cur.execute("SELECT byond_key, discord_id FROM player WHERE discord_id IS NOT NULL")
usersToProcess = cur.fetchall()
# We dont need the DB anymore, so close it up
dbCon.close()
# Calculate a total for better monitoring
total = len(usersToProcess)
count = 0
print("Found "+str(total)+" accounts to process.")
# Now the actual processing
for user in usersToProcess:
count += 1 # Why the fuck does python not have ++
# user[0] = ckey, user[1] = discord ID
print("Processing "+str(user[0])+" (Discord ID: " + str(user[1]) + ") | User "+str(count)+"/"+str(total))
url = "https://discord.com/api/guilds/"+str(guildID)+"/members/"+str(user[1])+"/roles/"+str(roleID)
response = requests.put(url, headers={"Authorization": "Bot "+str(botToken)})
# Adding a role returns a code 204, not a code 200. Dont ask
if response.status_code != 204:
print("WARNING: Returned non-204 status code. Request used: PUT "+str(url))
# Sleep for 0.6. This way we stay under discords rate limiting.
time.sleep(0.6)
| agpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/python/data/experimental/kernel_tests/wrap_unwrap_test.py | 8 | 2744 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wrapping / Unwrapping dataset variants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class WrapDatasetVariantTest(test_base.DatasetTestBase):
def testBasic(self):
ds = dataset_ops.Dataset.range(100)
ds_variant = ds._variant_tensor # pylint: disable=protected-access
wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant)
unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(wrapped_variant)
variant_ds = dataset_ops._VariantDataset(unwrapped_variant,
ds._element_structure)
get_next = self.getNext(variant_ds, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
@test_util.run_v1_only("b/123901304")
def testSkipEagerGPU(self):
ds = dataset_ops.Dataset.range(100)
ds_variant = ds._variant_tensor # pylint: disable=protected-access
wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant)
with ops.device("/gpu:0"):
gpu_wrapped_variant = array_ops.identity(wrapped_variant)
unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(
gpu_wrapped_variant)
variant_ds = dataset_ops._VariantDataset(unwrapped_variant,
ds._element_structure)
iterator = dataset_ops.make_initializable_iterator(variant_ds)
get_next = iterator.get_next()
with self.cached_session():
self.evaluate(iterator.initializer)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next))
if __name__ == "__main__":
test.main()
| apache-2.0 |
sagark/tsdb-perf-test | readingdb/readingdb_unit.py | 1 | 3340 | #!/usr/bin/jython
"""Unit tests for the readingdb interface"""
import unittest
from t_readingdb import *
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
"""Runs before every test"""
self.db = ReadingDBAccess()
print('\n')
def gen_to_list(self, gen):
return gen.make_into_list()
def sequential_inserter(self, width):
"""Generator needs to be setup before calling this."""
while True:
try:
if width:
self.db.run_insert_w()
else:
self.db.run_insert_h()
except StopIteration:
print("Insertion Completed")
break
except:
print("An error occurred during the insertion")
break
def insert_query_all(self, width):
"""Test sequential inserting and query_all for 101 records in 101
streams. Because MySQL does not place a guarantee on query return
order, this test is rather slow."""
gen = self.db.init_insert(101, 101, width, True)
compareresult = self.gen_to_list(gen)
self.sequential_inserter(width)
#compareresult.pop(0) #test the test
#compareresult += ['LOL'] #test the test
result = self.db.run_query_all(debug=True)
if False:
print(result)
print(compareresult)
self.assertEqual(len(result), len(compareresult))
for x in compareresult:
self.assert_(x in result)
def test_query_all_width(self):
"""A simultaneous test of query_all and width-wise sequential
insertion."""
self.insert_query_all(True)
print("test_query_all_width passed")
def test_query_all_height(self):
"""A simultaneous test of query_all and height-wise sequential
insertion."""
self.insert_query_all(False)
print("test_query_all_height passed")
def test_query(self):
"""Test query over a range of records/streams"""
# want to check 1) length of result and 2) that all values in result
# are in the generator, although it would be pretty hard for them not
# to be
width = True #we'll only do one here since it really doesn't matter
gen = self.db.init_insert(101, 101, width, True)
compareresult = self.gen_to_list(gen)
self.sequential_inserter(width)
records = 10
streams = 10
result = self.db.query(records, streams, True)
self.assertEqual(len(result), records*streams)
for x in result:
self.assert_(x in compareresult)
print("test_query passed")
def test_query_single(self):
"""Test query of a single stream"""
width = True #doesn't really matter, just pick one
gen = self.db.init_insert(101, 101, width, True)
compareresult = self.gen_to_list(gen)
self.sequential_inserter(width)
records = 100
streamid = 4
result = self.db.query_single(records, streamid, True)
self.assertEqual(len(result), records)
for x in result:
self.assert_(x in compareresult)
print("test_query_single passed")
if __name__== '__main__':
unittest.main()
| bsd-2-clause |
tswsl1989/Minecraft-Overviewer | test/test_rendertileset.py | 4 | 6912 | import unittest
from itertools import chain
from overviewer_core.tileset import iterate_base4, RendertileSet
from overviewer_core.util import roundrobin
class RendertileSetTest(unittest.TestCase):
# If you change this definition, you must also change the hard-coded
# results list in test_posttraverse()
tile_paths = frozenset([
# Entire subtree 0/0 is in the set, nothing else under 0
(0,0,0),
(0,0,1),
(0,0,2),
(0,0,3),
# A few tiles under quadrant 1
(1,0,3),
(1,1,3),
(1,2,0),
# Entire subtree under quadrant 2 is in the set
(2,0,0),
(2,0,1),
(2,0,2),
(2,0,3),
(2,1,0),
(2,1,1),
(2,1,2),
(2,1,3),
(2,2,0),
(2,2,1),
(2,2,2),
(2,2,3),
(2,3,0),
(2,3,1),
(2,3,2),
(2,3,3),
# Nothing under quadrant 3
])
# The paths as yielded by posttraversal, in an expanding-from-the-center
# order.
tile_paths_posttraversal_lists = [
[
(0,0,3),
(0,0,1),
(0,0,2),
(0,0,0),
(0,0),
(0,),
],
[
(1,2,0),
(1,2),
(1,0,3),
(1,0),
(1,1,3),
(1,1),
(1,),
],
[
(2,1,1),
(2,1,0),
(2,1,3),
(2,1,2),
(2,1),
(2,0,1),
(2,0,3),
(2,0,0),
(2,0,2),
(2,0),
(2,3,1),
(2,3,0),
(2,3,3),
(2,3,2),
(2,3),
(2,2,1),
(2,2,0),
(2,2,3),
(2,2,2),
(2,2),
(2,),
],
]
# Non-round robin post-traversal: finish the first top-level quadrant
# before moving to the second etc.
tile_paths_posttraversal = list(chain(*tile_paths_posttraversal_lists)) + [()]
# Round-robin post-traversal: start rendering to all directions from the
# center.
tile_paths_posttraversal_robin = list(roundrobin(tile_paths_posttraversal_lists)) + [()]
def setUp(self):
self.tree = RendertileSet(3)
for t in self.tile_paths:
self.tree.add(t)
def test_query(self):
"""Make sure the correct tiles in the set"""
for path in iterate_base4(3):
if path in self.tile_paths:
self.assertTrue( self.tree.query_path(path) )
else:
self.assertFalse( self.tree.query_path(path) )
def test_iterate(self):
"""Make sure iterating over the tree returns each tile exactly once"""
dirty = set(self.tile_paths)
for p in self.tree:
# Can't use assertIn, was only added in 2.7
self.assertTrue(p in dirty)
# Should not see this one again
dirty.remove(p)
# Make sure they were all returned
self.assertEqual(len(dirty), 0)
def test_iterate_levelmax(self):
"""Same as test_iterate, but specifies the level explicitly"""
dirty = set(self.tile_paths)
for p in self.tree.iterate(3):
# Can't use assertIn, was only added in 2.7
self.assertTrue(p in dirty)
# Should not see this one again
dirty.remove(p)
# Make sure they were all returned
self.assertEqual(len(dirty), 0)
def test_iterate_fail(self):
"""Meta-test: Make sure test_iterate() would actually fail"""
# if an extra item were returned"""
self.tree.add((1,1,1))
self.assertRaises(AssertionError, self.test_iterate)
# If something was supposed to be returned but wasn't
tree = RendertileSet(3)
c = len(self.tile_paths) // 2
for t in self.tile_paths:
tree.add(t)
c -= 1
if c <= 0:
break
self.tree = tree
self.assertRaises(AssertionError, self.test_iterate)
def test_count(self):
self.assertEqual(self.tree.count(), len(self.tile_paths))
def test_bool(self):
"Tests the boolean status of a node"
self.assertTrue(self.tree)
t = RendertileSet(3)
self.assertFalse(t)
t.add((0,0,0))
self.assertTrue(t)
def test_query_level(self):
"Tests querying at a level other than max"
# level 2
l2 = set()
for p in self.tile_paths:
l2.add(p[0:2])
for path in iterate_base4(2):
if path in l2:
self.assertTrue( self.tree.query_path(path) )
else:
self.assertFalse( self.tree.query_path(path) )
# level 1:
self.assertTrue( self.tree.query_path((0,)))
self.assertTrue( self.tree.query_path((1,)))
self.assertTrue( self.tree.query_path((2,)))
self.assertFalse( self.tree.query_path((3,)))
def test_iterate_level(self):
"""Test iterating at a level other than max"""
# level 2
l2 = set()
for p in self.tile_paths:
l2.add(p[0:2])
for p in self.tree.iterate(2):
self.assertTrue(p in l2, "%s was not supposed to be returned!" % (p,))
l2.remove(p)
self.assertEqual(len(l2), 0, "Never iterated over these items: %s" % l2)
# level 1
l1 = set()
for p in self.tile_paths:
l1.add(p[0:1])
for p in self.tree.iterate(1):
self.assertTrue(p in l1, "%s was not supposed to be returned!" % (p,))
l1.remove(p)
self.assertEqual(len(l1), 0, "Never iterated over these items: %s" % l1)
def test_posttraverse(self):
"""Test a post-traversal of the tree's dirty tiles"""
# Expect the results in this proper order.
iterator = iter(self.tree.posttraversal())
for expected, actual in zip(self.tile_paths_posttraversal, iterator):
self.assertEqual(actual, expected)
self.assertRaises(StopIteration, next, iterator)
def test_posttraverse_roundrobin(self):
"""Test a round-robin post-traversal of the tree's dirty tiles"""
# Expect the results in this proper order.
iterator = iter(self.tree.posttraversal(robin=True))
for expected, actual in zip(self.tile_paths_posttraversal_robin, iterator):
self.assertEqual(actual, expected)
self.assertRaises(StopIteration, next, iterator)
def test_count_all(self):
"""Tests getting a count of all tiles (render tiles plus upper tiles)
"""
c = self.tree.count_all()
self.assertEqual(c, 35)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
jreback/pandas | pandas/tests/frame/constructors/test_from_records.py | 2 | 16550 | from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index="f1")
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert "index" not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
assert np.isnan(df["c"][0])
def test_from_records_iterator(self):
arr = np.array(
[(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
dtype=[
("x", np.float64),
("u", np.float32),
("y", np.int64),
("z", np.int32),
],
)
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame(
{
"x": np.array([1.0, 3.0], dtype=np.float64),
"u": np.array([1.0, 3.0], dtype=np.float32),
"y": np.array([2, 4], dtype=np.int64),
"z": np.array([2, 4], dtype=np.int32),
}
)
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield (i, letters[i % len(letters)], i / length)
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield [i, letters[i % len(letters)], i / length]
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in list_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
columns = ["a", "b", "c"]
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
assert columns == original_columns
def test_from_records_decimal(self):
tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
df = DataFrame.from_records(tuples, columns=["a"])
assert df["a"].dtype == object
df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
assert df["a"].dtype == np.float64
assert np.isnan(df["a"].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {
"order_id": order_id,
"quantity": np.random.randint(1, 10),
"price": np.random.randint(1, 10),
}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({"order_id": 10, "quantity": 5})
result = DataFrame.from_records(documents, index="order_id")
assert result.index.name == "order_id"
# MultiIndex
result = DataFrame.from_records(documents, index=["order_id", "quantity"])
assert result.index.names == ("order_id", "quantity")
def test_from_records_misc_brokenness(self):
# GH#2179
data = {1: ["foo"], 2: ["bar"]}
result = DataFrame.from_records(data, columns=["a", "b"])
exp = DataFrame(data, columns=["a", "b"])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {"a": [1, 2, 3], "b": [4, 5, 6]}
result = DataFrame.from_records(data, index=["a", "b", "c"])
exp = DataFrame(data, index=["a", "b", "c"])
tm.assert_frame_equal(result, exp)
# GH#2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
def test_from_records_empty(self):
# GH#3562
result = DataFrame.from_records([], columns=["a", "b", "c"])
expected = DataFrame(columns=["a", "b", "c"])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=["a", "b", "b"])
expected = DataFrame(columns=["a", "b", "b"])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(a, index="id")
tm.assert_index_equal(df.index, Index([1], name="id"))
assert df.index.name == "id"
tm.assert_index_equal(df.columns, Index(["value"]))
b = np.array([], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(b, index="id")
tm.assert_index_equal(df.index, Index([], name="id"))
assert df.index.name == "id"
| bsd-3-clause |
hjanime/VisTrails | vistrails/core/interpreter/default.py | 1 | 3689 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import vistrails.core.interpreter.cached
import vistrails.core.interpreter.noncached
import unittest
cached_interpreter = vistrails.core.interpreter.cached.CachedInterpreter
noncached_interpreter = vistrails.core.interpreter.noncached.Interpreter
__default_interpreter = cached_interpreter
##############################################################################
def set_cache_configuration(field, value):
assert field == 'cache'
if value:
set_default_interpreter(cached_interpreter)
else:
set_default_interpreter(noncached_interpreter)
def connect_to_configuration(configuration):
configuration.subscribe('cache', set_cache_configuration)
def get_default_interpreter():
"""Returns an instance of the default interpreter class."""
return __default_interpreter.get()
def set_default_interpreter(interpreter_class):
"""Sets the default interpreter class."""
global __default_interpreter
__default_interpreter = interpreter_class
##############################################################################
class TestDefaultInterpreter(unittest.TestCase):
def test_set(self):
old_interpreter = type(get_default_interpreter())
try:
set_default_interpreter(noncached_interpreter)
self.assertEquals(type(get_default_interpreter()),
noncached_interpreter)
set_default_interpreter(cached_interpreter)
self.assertEquals(type(get_default_interpreter()),
cached_interpreter)
finally:
set_default_interpreter(old_interpreter)
self.assertEquals(type(get_default_interpreter()),
old_interpreter)
| bsd-3-clause |
Boussadia/weboob | modules/weather/test.py | 4 | 1444 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Arno Renevier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class WeatherTest(BackendTest):
BACKEND = 'weather'
def test_cities(self):
paris = self.backend.iter_city_search('crappything¶m=;drop database')
self.assertTrue(len(list(paris)) == 0)
paris = self.backend.iter_city_search('paris')
self.assertTrue(len(list(paris)) >= 1)
paris = self.backend.iter_city_search('paris france')
self.assertTrue(len(list(paris)) == 1)
current = self.backend.get_current(paris[0].id)
self.assertTrue(current.temp.value is float(current.temp.value))
forecasts = list(self.backend.iter_forecast(paris[0].id))
self.assertTrue(len(forecasts) == 10)
| agpl-3.0 |
emersonsoftware/ansiblefork | lib/ansible/modules/source_control/gitlab_user.py | 10 | 12548 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exists in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
- administrator rights on the Gitlab server
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the user you want to create
required: true
username:
description:
- The username of the user.
required: true
password:
description:
- The password of the user.
required: true
email:
description:
- The email that belongs to the user.
required: true
sshkey_name:
description:
- The name of the sshkey
required: false
default: null
sshkey_file:
description:
- The ssh key itself.
required: false
default: null
group:
description:
- Add user as an member to this group.
required: false
default: null
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master
- owner
required: false
default: null
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: present
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab User
gitlab_user:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
username: myusername
state: absent
delegate_to: localhost
- name: Create Gitlab User
gitlab_user:
server_url: https://gitlab.dj-wasabi.local
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: My Name
username: myusername
password: mysecretpassword
email: me@example.com
sshkey_name: MySSH
sshkey_file: ssh-rsa AAAAB3NzaC1yc...
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class GitLabUser(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def addToGroup(self, group_id, user_id, access_level):
if access_level == "guest":
level = 10
elif access_level == "reporter":
level = 20
elif access_level == "developer":
level = 30
elif access_level == "master":
level = 40
elif access_level == "owner":
level = 50
return self._gitlab.addgroupmember(group_id, user_id, level)
def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
group_id = ''
arguments = {"name": user_name,
"username": user_username,
"email": user_email}
if group_name is not None:
if self.existsGroup(group_name):
group_id = self.getGroupId(group_name)
if self.existsUser(user_username):
self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
# Create the user
user_username = arguments['username']
user_name = arguments['name']
user_email = arguments['email']
if self._gitlab.createuser(password=user_password, **arguments):
user_id = self.getUserId(user_username)
if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
# Add the user to the group if group_id is not empty
if group_id != '':
if self.addToGroup(group_id, user_id, access_level):
user_changed = True
user_changed = True
# Exit with change to true or false
if user_changed:
self._module.exit_json(changed=True, result="Created the user")
else:
self._module.exit_json(changed=False)
def deleteUser(self, user_username):
user_id = self.getUserId(user_username)
if self._gitlab.deleteuser(user_id):
self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
else:
self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
def existsGroup(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
return False
def existsUser(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return True
return False
def getGroupId(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getUserId(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return user['id']
def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
user_username = arguments['username']
user_id = self.getUserId(user_username)
user_data = self._gitlab.getuser(user_id=user_id)
# Lets check if we need to update the user
for arg_key, arg_value in arguments.items():
if user_data[arg_key] != arg_value:
user_changed = True
if user_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._gitlab.edituser(user_id=user_id, **arguments)
user_changed = True
if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
if group_id != '':
if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
user_changed = True
if user_changed:
self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
else:
self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
def main():
global user_id
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
name=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
email=dict(required=True),
sshkey_name=dict(required=False),
sshkey_file=dict(required=False),
group=dict(required=False),
access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
state=dict(default="present", choices=["present", "absent"]),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
user_name = module.params['name']
user_username = module.params['username']
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_name = module.params['group']
access_level = module.params['access_level']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Check if vars are none
if user_sshkey_file is not None and user_sshkey_name is not None:
use_sshkey = True
else:
use_sshkey = False
if group_name is not None and access_level is not None:
add_to_group = True
group_name = group_name.lower()
else:
add_to_group = False
user_username = user_username.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Validate if group exists and take action based on "state"
user = GitLabUser(module, git)
# Check if user exists, if not exists and state = absent, we exit nicely.
if not user.existsUser(user_username) and state == "absent":
module.exit_json(changed=False, result="User already deleted or does not exists")
else:
# User exists,
if state == "absent":
user.deleteUser(user_username)
else:
user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
if __name__ == '__main__':
main()
| gpl-3.0 |
lalinsky/acoustid-server | alembic/versions/661fce6a2f64_more_fields_in_submission.py | 1 | 2671 | """more fields in submission
Revision ID: 661fce6a2f64
Revises: 0c79593066ed
Create Date: 2019-11-14 12:02:55.010214
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '661fce6a2f64'
down_revision = '0c79593066ed'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('account_id', sa.Integer(), nullable=True))
op.add_column('submission', sa.Column('application_id', sa.Integer(), nullable=True))
op.add_column('submission', sa.Column('application_version', sa.String(), nullable=True))
op.add_column('submission', sa.Column('foreignid', sa.String(), nullable=True))
op.add_column('submission', sa.Column('format', sa.String(), nullable=True))
op.add_column('submission', sa.Column('handled_at', sa.DateTime(timezone=True), nullable=True))
op.add_column('submission', sa.Column('meta', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
op.alter_column('submission', 'source_id',
existing_type=sa.INTEGER(),
nullable=True)
op.add_column('submission_result', sa.Column('handled_at', sa.DateTime(timezone=True), nullable=True))
# ### end Alembic commands ###
def downgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission_result', 'handled_at')
op.alter_column('submission', 'source_id',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_column('submission', 'meta')
op.drop_column('submission', 'handled_at')
op.drop_column('submission', 'format')
op.drop_column('submission', 'foreignid')
op.drop_column('submission', 'application_version')
op.drop_column('submission', 'application_id')
op.drop_column('submission', 'account_id')
# ### end Alembic commands ###
def upgrade_fingerprint():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_fingerprint():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| mit |
bmya/odoo_addons | smile_access_control/tests/test_users.py | 5 | 4010 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
from openerp.exceptions import ValidationError
class TestUsers(TransactionCase):
def setUp(self):
super(TestUsers, self).setUp()
users_obj = self.env['res.users']
groups_obj = self.env['res.groups']
# Create groups
self.group1, self.group2 = map(lambda index: groups_obj.create({'name': 'Group %d' % index}), range(1, 3))
# Create user profiles
self.user_profile1 = users_obj.create({
'name': 'Profile 1',
'login': 'profile1',
'user_profile': True,
'groups_id': [(4, self.group1.id)],
})
self.user_profile2 = users_obj.create({
'name': 'Profile 2',
'login': 'profile2',
'user_profile': True,
'groups_id': [(6, 0, (self.group1 | self.group2).ids)],
})
# Create users
self.user = users_obj.create({
'name': 'Demo User',
'login': 'demouser',
'user_profile_id': self.user_profile1.id,
})
def test_create(self):
"""
Test create method
We create a dictionary of values
We create a user from these values, he has a user profile
We check that that the new user has been created with his name
"""
userValue = {'name': 'User Test 1',
'login': 'usertest1',
'user_profile_id': self.user_profile2.id,
}
users_obj = self.env['res.users']
user_test = users_obj.create(userValue)
newUser = self.env['res.users'].browse(user_test.id)
self.assertEqual(userValue['name'], newUser['name'])
def test_write(self):
"""
Test write method
We use the user created in the first method
We change his user_profile_id
We check if the update has been done
"""
userEdited = self.env['res.users'].browse(self.user.id).write({'user_profile_id': self.user_profile2.id})
self.assertEqual(userEdited, True)
def test_check_user_profile_id(self):
"""
Test _check_user_profile_id method
We try to create a user with admin as user profile
It raises a Validation Error
"""
userValue = {'name': 'User Test 1',
'login': 'usertest1',
'user_profile_id': self.env.ref('base.user_root').id,
}
with self.assertRaises(ValidationError):
self.env['res.users'].create(userValue)
def test_onchange_user_profile(self):
"""
Test onchange user profile method
We try to set the profile of an existing user to admin
It raises a Validation Error
"""
admin = self.env.ref('base.user_root').id
with self.assertRaises(ValidationError):
self.env['res.users'].browse(self.user.id).write({'user_profile_id': admin})
| agpl-3.0 |
ptchankue/youtube-dl | youtube_dl/extractor/googleplus.py | 129 | 2568 | # coding: utf-8
from __future__ import unicode_literals
import re
import codecs
from .common import InfoExtractor
from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
IE_DESC = 'Google Plus'
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
_TEST = {
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
title = self._og_search_description(webpage).splitlines()[0]
upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
uploader = self._html_search_regex(
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
def unicode_escape(s):
decoder = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4,}',
lambda m: decoder(m.group(0))[0],
s)
# Extract video links all sizes
formats = [{
'url': unicode_escape(video_url),
'ext': 'flv',
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'uploader': uploader,
'upload_date': upload_date,
'formats': formats,
}
| unlicense |
OpenTreeOfLife/peyotl | peyotl/evaluate_tree.py | 2 | 10490 | #!/usr/bin/env python
from peyotl.ott import create_pruned_and_taxonomy_for_tip_ott_ids
from peyotl.phylo.compat import compare_bits_as_splits, SplitComparison
from peyotl.utility import any_early_exit, get_logger
_LOG = get_logger(__name__)
def evaluate_tree_rooting(nexson, ott, tree_proxy):
"""
Returns None if the taxanomy contributes no information to the rooting decision
(e.g. all of the tips are within one genus in the taxonomy)
TODO: need to coordinate with Jim Allman and see if we can
do this in a non- O(nm) manner (where n and m are the # of non-trivial edges in the phylo and taxo tree)
putting barrier notes on the phylo tree would work...
"""
pruned_phylo, taxo_tree = create_pruned_and_taxonomy_for_tip_ott_ids(tree_proxy, ott)
if taxo_tree is None: # this can happen if no otus are mapped
return None
has_taxo_groupings = any_early_exit(taxo_tree.root.child_iter(), lambda node: not node.is_leaf)
if not has_taxo_groupings:
return None
has_phylo_groupings = any_early_exit(pruned_phylo.root.child_iter(), lambda node: not node.is_leaf)
if not has_phylo_groupings:
return None
id2bit = pruned_phylo.add_bits4subtree_ids(None)
taxo_tree.add_bits4subtree_ids(id2bit)
assert taxo_tree.root.bits4subtree_ids == pruned_phylo.root.bits4subtree_ids
taxo_nontriv_splits = taxo_tree.bits2internal_node
taxon_mask = taxo_tree.root.bits4subtree_ids
# _LOG.debug('taxo_nontriv_splits = {}'.format(taxo_nontriv_splits))
# might want to copy this dict rather than modify in place..
del taxo_nontriv_splits[taxon_mask] # root bitmask is trivial
_LOG.debug('taxon_mask = {} (which is {} bits)'.format(bin(taxon_mask)[2:], len(bin(taxon_mask)) - 2))
num_ids = len(id2bit)
_LOG.debug('id2bit has length = {}'.format(len(id2bit)))
# for checking tips of the phylogeny, it is nice to know which leaf OTUs attach
# at the base of the taxonomy (no other grouping)
basal_taxo = set()
basal_bits = 0
for c in taxo_tree.root.child_iter():
if c.is_leaf:
basal_taxo.add(c._id)
basal_bits |= id2bit[c._id]
_LOG.debug('basal_bits = {}'.format(bin(basal_bits)[2:].zfill(num_ids)))
_LOG.debug('# nontrivial taxo splits = {}'.format(len(taxo_nontriv_splits)))
_EMPTY_SET = frozenset([])
non_root_pp_preorder = [nd for nd in pruned_phylo.preorder_node_iter()][1:]
curr_root_incompat_set = set()
any_root_incompat_set = set()
_taxo_node_id_set_cache = {_EMPTY_SET: _EMPTY_SET}
for node in non_root_pp_preorder:
edge = node.edge
if node.is_leaf:
edge._displays = None
edge._inverted_displays = None
b = id2bit[node._id]
if node._id in basal_taxo:
edge._not_inverted_incompat = _EMPTY_SET
edge._inverted_incompat = _EMPTY_SET
inv_mask = taxon_mask - b
idisp = taxo_nontriv_splits.get(inv_mask)
if idisp is not None:
edge._inverted_displays = idisp
else:
edge._not_inverted_incompat = _EMPTY_SET
# TODO would be more efficient to jump to tip and walk back...
b = id2bit[node._id]
ii = set()
for tb, tid in taxo_nontriv_splits.items():
if tb & b:
ii.add(tid)
edge._inverted_incompat = _get_cached_set(ii, _taxo_node_id_set_cache)
disp = taxo_nontriv_splits.get(b)
if disp is not None:
edge._displays = disp
else:
# TODO this could be more efficient...
b = node.bits4subtree_ids
nii = set()
ii = set()
e = set()
ie = set()
displays = None
inv_displays = None
# TODO: this loop does not take advantage of the fact that
# taxo_nontriv_splits are splits from a tree (hence compatible with each other)
for tb, tid in taxo_nontriv_splits.items():
sp_result = compare_bits_as_splits(b, tb, taxon_mask)
if sp_result == SplitComparison.UNROOTED_INCOMPATIBLE:
any_root_incompat_set.add(tid)
nii.add(tid)
ii.add(tid)
elif sp_result == SplitComparison.UNROOTED_COMPAT:
nii.add(tid)
elif sp_result == SplitComparison.ROOTED_COMPAT:
ii.add(tid)
elif sp_result == SplitComparison.UNROOTED_EQUIVALENT:
ie.add(tid)
inv_displays = tid
elif sp_result == SplitComparison.ROOTED_EQUIVALENT:
e.add(tid)
displays = tid
edge._not_inverted_incompat = _get_cached_set(nii, _taxo_node_id_set_cache)
edge._inverted_incompat = _get_cached_set(ii, _taxo_node_id_set_cache)
edge._equiv = _get_cached_set(e, _taxo_node_id_set_cache)
edge._inverted_equiv = _get_cached_set(ie, _taxo_node_id_set_cache)
edge._displays = displays
edge._inverted_displays = inv_displays
curr_root_incompat_set.update(nii)
# create a set to be filled in in the loop below (for each internal node)
node._inc_contrib_rootward = set()
node._displays_contrib_rootward = set()
pproot = pruned_phylo.root
pproot._incompat_if_rooted_below = set()
pproot._inc_contrib_rootward = set()
pproot._displays_contrib_rootward = set()
for node in reversed(non_root_pp_preorder):
edge = node.edge
if node.is_leaf:
edge._inc_contrib_rootward = _EMPTY_SET
node._displays_contrib_rootward = _EMPTY_SET
else:
par = node.parent
iaobc = set(edge._not_inverted_incompat)
iaobc.update(node._inc_contrib_rootward)
edge._inc_contrib_rootward = _get_cached_set(iaobc, _taxo_node_id_set_cache)
par._inc_contrib_rootward.update(edge._inc_contrib_rootward)
par._displays_contrib_rootward.update(node._displays_contrib_rootward)
if edge._displays is not None:
par._displays_contrib_rootward.add(edge._displays)
_LOG.debug('# root _inc_contrib_rootward = {}'.format(pruned_phylo.root._inc_contrib_rootward))
_LOG.debug('# curr_root_incompat_set = {}'.format(curr_root_incompat_set))
pproot.rooting_here_incompat = _get_cached_set(pproot._inc_contrib_rootward, _taxo_node_id_set_cache)
pproot.rooting_here_incompat_score = len(pproot.rooting_here_incompat)
pproot.rooting_here_displays = _get_cached_set(pproot._displays_contrib_rootward, _taxo_node_id_set_cache)
pproot.rooting_here_disp_score = len(pproot.rooting_here_displays)
pproot.rooting_here_score = (pproot.rooting_here_disp_score, pproot.rooting_here_incompat_score)
pproot._inc_contrib_tipward = _EMPTY_SET
pproot._disp_contrib_tipward = _EMPTY_SET
best_score = pproot.rooting_here_score
best_rootings = [pproot]
# now sweep up
for node in non_root_pp_preorder:
edge = node.edge
parent = node.parent
sib_inc_union = set()
sib_disp = set()
for sib in node.sib_iter():
sib_inc_union.update(sib.edge._inc_contrib_rootward)
sib_disp.update(sib._displays_contrib_rootward)
if sib.edge._displays is not None:
sib_disp.add(sib.edge._displays)
# if we are visiting an internal node, we have to figure out the cost of
# rooting at the node too...
if not node.is_leaf:
icu = set()
icu.update(edge._inverted_incompat)
icu.update(sib_inc_union)
icu.update(parent._inc_contrib_tipward)
node._inc_contrib_tipward = _get_cached_set(icu, _taxo_node_id_set_cache)
dci = set(sib_disp)
if edge._inverted_displays is not None:
dci.add(edge._displays)
dci.update(parent._disp_contrib_tipward)
node._disp_contrib_tipward = _get_cached_set(dci, _taxo_node_id_set_cache)
rhi = set()
rhi.update(icu)
rhi.update(node._inc_contrib_rootward)
node.rooting_here_incompat = _get_cached_set(rhi, _taxo_node_id_set_cache)
rhd = set(node._displays_contrib_rootward)
rhd.update(node._disp_contrib_tipward)
node.rooting_here_displays = _get_cached_set(rhd, _taxo_node_id_set_cache)
best_score, best_rootings = _check_for_opt_score(node, best_score, best_rootings)
# figure out the # of conflicts if rooting on this edge...
rhi = set()
rhi.update(edge._inverted_incompat)
rhi.update(sib_inc_union)
edge.rooting_here_incompat = _get_cached_set(rhi, _taxo_node_id_set_cache)
rhd = set(parent._disp_contrib_tipward)
rhd.update(parent.rooting_here_displays)
if edge._inverted_displays is not None:
rhd.add(edge._inverted_displays)
edge.rooting_here_displays = _get_cached_set(rhd, _taxo_node_id_set_cache)
best_score, best_rootings = _check_for_opt_score(edge, best_score, best_rootings)
_LOG.debug('best_score = {}'.format(best_score))
_LOG.debug('best_rootings = {}'.format(best_rootings))
_LOG.debug('current score = {}'.format(pproot.rooting_here_score))
_LOG.debug('any_root_incompat_set (size={}) = {}'.format(len(any_root_incompat_set), any_root_incompat_set))
def _check_for_opt_score(entity, best, best_list):
incompat = len(entity.rooting_here_incompat)
ds = len(entity.rooting_here_displays)
entity.rooting_here_incompat_score = incompat
entity.rooting_here_disp_score = ds
entity.rooting_here_score = (entity.rooting_here_disp_score, entity.rooting_here_incompat_score)
high_disp, low_incompat = best
if ds > high_disp:
best = entity.rooting_here_score
best_list = [entity]
elif ds == high_disp:
if incompat < low_incompat:
best = entity.rooting_here_score
best_list = [entity]
elif incompat == low_incompat:
best_list.append(entity)
return best, best_list
def _get_cached_set(s, dict_frozensets):
fs = frozenset(s)
return dict_frozensets.setdefault(fs, fs)
| bsd-2-clause |
leifos/ifind | ifind/seeker/list_reader.py | 1 | 2413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# seekiir Framework - Indexing
# ListReader class
#
import string
from os import linesep
from ifind.seeker.common_helpers import file_exists
class ListReader(list):
'''
A simple class which extends the Python list object. Reads in the input file list and stores them in this object.
'''
def __init__(self, filename):
'''
Initialises the required instance variables. Opens the input file (if possible) and calls the __read_file method. Throws an IOError exception if the file cannot be opened.
'''
self.__filename = filename
if file_exists(self.__filename):
self.__file_handle = open(self.__filename, mode='r')
self.__read_file()
elif filename is not None:
raise IOError("Could not open the specified list file, %s." % self.__filename)
def __read_file(self):
'''
Reads the opened input file and places valid filenames in the internal list.
'''
lines = self.__file_handle.readlines()
self.__file_handle.close()
for line in lines:
line = string.rstrip(line, linesep) # Strip trailing newline character
if not(line.startswith('#') or len(' '.join(line.split())) == 0): # Line doesn't start with a hash, and when all whitespace is removed, the string doesn't have a length of zero
append_value = self._can_append_entry(line)
if append_value:
self.append(append_value.strip())
def save(self, filename):
'''
Saves the contents of the list object to a file, one item per line. The filename to save to is specified as filename. If the file exists, the contents of the original file is overwritten.
'''
file_object = open(filename, 'w')
for item in self:
file_object.write(str(item) + linesep) # One item per line (os.linesep to split lines natively)
file_object.close()
def _can_append_entry(self, line):
'''
A callback method which is called in __read_file(). This method returns either False if the line should not be appended to the list, or the value which should be appended to this. This means that the method can alter the value to be appended (e.g. converting to lowercase) before the append method is called.
'''
return line | mit |
cougar-enigma/Enigma2PC | lib/python/Plugins/Extensions/DVDBurn/TitleList.py | 4 | 17772 | import DVDProject, TitleList, TitleCutter, TitleProperties, ProjectSettings, DVDToolbox, Process
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.Task import job_manager
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.MultiContent import MultiContentEntryText
from Components.Label import MultiColorLabel
from enigma import gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
class TitleList(Screen, HelpableScreen):
skin = """
<screen name="TitleList" position="center,center" size="560,470" title="DVD Tool" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="title_label" render="Label" position="10,48" size="540,38" font="Regular;18" transparent="1" />
<widget source="error_label" render="Label" position="10,48" size="540,296" zPosition="3" font="Regular;20" transparent="1" />
<widget source="titles" render="Listbox" scrollbarMode="showOnDemand" position="10,86" size="546,296" zPosition="3" transparent="1" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (0, 0), size = (360, 20), font = 0, flags = RT_HALIGN_LEFT, text = 1), # index 1 Title,
MultiContentEntryText(pos = (0, 20), size = (360, 17), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 description,
MultiContentEntryText(pos = (366, 6), size = (152, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 3), # index 3 channel,
MultiContentEntryText(pos = (366, 20), size = (102, 17), font = 1, flags = RT_HALIGN_RIGHT, text = 4), # index 4 begin time,
MultiContentEntryText(pos = (470, 20), size = (48, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 5), # index 5 duration,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 14)],
"itemHeight": 37
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,390" zPosition="10" size="560,2" />
<ePixmap pixmap="skin_default/buttons/key_menu.png" position="10,394" size="35,25" alphatest="on" />
<widget source="hint" render="Label" position="50,396" size="540,22" font="Regular;18" halign="left" />
<widget name="medium_label" position="10,420" size="540,22" font="Regular;18" halign="left" foregroundColors="#FFFFFF,#FFFF00,#FF0000" />
<widget source="space_bar_single" render="Progress" position="10,446" size="270,24" borderWidth="1" zPosition="2" backgroundColor="#254f7497" />
<widget source="space_label_single" render="Label" position="10,449" size="270,22" zPosition="3" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
<widget source="space_bar_dual" render="Progress" position="10,446" size="540,24" borderWidth="1" backgroundColor="#254f7497" />
<widget source="space_label_dual" render="Label" position="10,449" size="540,22" zPosition="2" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["titleactions"] = HelpableActionMap(self, "DVDTitleList",
{
"addTitle": (self.addTitle, _("Add a new title"), _("Add title")),
"titleProperties": (self.titleProperties, _("Properties of current title"), _("Title properties")),
"removeCurrentTitle": (self.removeCurrentTitle, _("Remove currently selected title"), _("Remove title")),
"settings": (self.settings, _("Collection settings"), _("Settings")),
"burnProject": (self.askBurnProject, _("Burn DVD"), _("Burn DVD")),
})
self["MovieSelectionActions"] = HelpableActionMap(self, "MovieSelectionActions",
{
"contextMenu": (self.showMenu, _("menu")),
})
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.leave
})
self["key_red"] = StaticText()
self["key_green"] = StaticText(_("Add title"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Settings"))
self["title_label"] = StaticText()
self["error_label"] = StaticText()
self["space_label_single"] = StaticText()
self["space_label_dual"] = StaticText()
self["hint"] = StaticText(_("Advanced Options"))
self["medium_label"] = MultiColorLabel()
self["space_bar_single"] = Progress()
self["space_bar_dual"] = Progress()
self["titles"] = List([])
self.previous_size = 0
if project is not None:
self.project = project
else:
self.newProject()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD Titlelist"))
def checkBackgroundJobs(self):
for job in job_manager.getPendingJobs():
print "type(job):", type(job)
print "Process.DVDJob:", Process.DVDJob
if type(job) == Process.DVDJob:
self.backgroundJob = job
return
self.backgroundJob = None
def showMenu(self):
menu = []
self.checkBackgroundJobs()
if self.backgroundJob:
j = self.backgroundJob
menu.append(("%s: %s (%d%%)" % (j.getStatustext(), j.name, int(100*j.progress/float(j.end))), self.showBackgroundJob))
menu.append((_("DVD media toolbox"), self.toolbox))
if self.project.settings.output.getValue() == "dvd":
if len(self["titles"].list):
menu.append((_("Burn DVD"), self.burnProject))
elif self.project.settings.output.getValue() == "iso":
menu.append((_("Create DVD-ISO"), self.burnProject))
menu.append((_("Burn existing image to DVD"), self.selectImage))
if len(self["titles"].list):
menu.append((_("Preview menu"), self.previewMenu))
menu.append((_("Edit chapters of current title"), self.editTitle))
menu.append((_("Reset and renumerate title names"), self.resetTitles))
menu.append((_("Exit"), self.leave))
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice:
choice[1]()
def showBackgroundJob(self):
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, self.backgroundJob)
self.backgroundJob = None
def titleProperties(self):
if self.getCurrentTitle():
self.session.openWithCallback(self.updateTitleList, TitleProperties.TitleProperties, self, self.project, self["titles"].getIndex())
def selectImage(self):
self.session.openWithCallback(self.burnISO, ProjectSettings.FileBrowser, "image", self.project.settings)
def newProject(self):
self.project = DVDProject.DVDProject()
if self.loadTemplate():
self.project.session = self.session
self.settingsCB()
def addTitle(self):
from Screens.MovieSelection import MovieSelection
from Components.ActionMap import HelpableActionMap
class DVDMovieSelection(MovieSelection):
skin = """<screen name="DVDMovieSelection" position="center,center" size="560,445" title="Select a movie">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="waitingtext" position="0,45" size="560,395" zPosition="4" font="Regular;22" halign="center" valign="center" />
<widget name="list" position="5,40" size="550,375" zPosition="2" scrollbarMode="showOnDemand" />
<widget name="DescriptionBorder" pixmap="skin_default/border_eventinfo.png" position="0,316" zPosition="1" size="560,103" transparent="1" alphatest="on" />
<widget source="Service" render="Label" position="5,318" zPosition="1" size="480,35" font="Regular;17" foregroundColor="#cccccc">
<convert type="MovieInfo">ShortDescription</convert>
</widget>
<widget source="Service" render="Label" position="495,318" zPosition="1" size="60,22" font="Regular;17" halign="right">
<convert type="ServiceTime">Duration</convert>
<convert type="ClockToText">AsLength</convert>
</widget>
<widget source="Service" render="Label" position="380,337" zPosition="2" size="175,22" font="Regular;17" halign="right">
<convert type="MovieInfo">RecordServiceName</convert>
</widget>
<widget source="Service" render="Label" position="5,357" zPosition="1" size="550,58" font="Regular;19">
<convert type="EventName">ExtendedDescription</convert>
</widget>
<widget name="freeDiskSpace" position="10,425" size="540,20" font="Regular;19" valign="center" halign="right" />
</screen>"""
def __init__(self, session):
MovieSelection.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Add"))
self["key_yellow"] = StaticText(_("Edit title"))
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.close, _("Close title selection")),
"green": (self.insertWithoutEdit, ("insert without cutlist editor")),
"yellow": (self.movieSelected, _("Add a new title"))
})
def updateTags(self):
pass
def doContext(self):
print "context menu forbidden inside DVDBurn to prevent calling multiple instances"
def insertWithoutEdit(self):
current = self.getCurrent()
if current is not None:
current.edit = False
self.close(current)
def movieSelected(self):
current = self.getCurrent()
if current is not None:
current.edit = True
self.close(current)
self.session.openWithCallback(self.selectedSource, DVDMovieSelection)
def selectedSource(self, source = None):
if source is None:
return None
if not source.getPath().endswith(".ts"):
self.session.open(MessageBox,text = _("You can only burn Dreambox recordings!"), type = MessageBox.TYPE_ERROR)
return None
t = self.project.addService(source)
try:
editor = source.edit
except AttributeError:
editor = True
self.editTitle(t, editor)
def removeCurrentTitle(self):
title = self.getCurrentTitle()
self.removeTitle(title)
def removeTitle(self, title):
if title is not None:
self.project.titles.remove(title)
self.updateTitleList()
def toolbox(self):
self.session.open(DVDToolbox.DVDToolbox)
def settings(self):
self.session.openWithCallback(self.settingsCB, ProjectSettings.ProjectSettings, self.project)
def settingsCB(self, update=True):
if not update:
return
self.updateTitleList()
def loadTemplate(self):
filename = resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/DreamboxDVD.ddvdp.xml"
if self.project.load(filename):
self["error_label"].setText("")
return True
else:
self["error_label"].setText(self.project.error)
return False
def askBurnProject(self):
if len(self["titles"].list):
self.session.openWithCallback(self.burnProject,MessageBox,text = _("Do you want to burn this collection to DVD medium?"), type = MessageBox.TYPE_YESNO)
def burnProject(self, answer=True):
if not answer:
return
if self.project.settings.authormode.getValue() == "data_ts":
job = Process.DVDdataJob(self.project)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
else:
job = Process.DVDJob(self.project)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def burnISO(self, path, scope, configRef):
if path:
job = Process.DVDisoJob(self.project, path)
job_manager.AddJob(job)
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
def previewMenu(self):
job = Process.DVDJob(self.project, menupreview=True)
job_manager.in_background = False
job_manager.AddJob(job)
def updateTitleList(self):
list = [ ]
for title in self.project.titles:
list.append((title, title.properties.menutitle.getValue(), title.properties.menusubtitle.getValue(), title.DVBchannel, title.formatDVDmenuText("$D.$M.$Y, $T", 0), title.formatDVDmenuText("$l", 0)))
self["titles"].list = list
self.updateSize()
if len(list):
self["key_red"].text = _("Remove title")
self["key_yellow"].text = _("Title properties")
self["title_label"].text = _("Table of content for collection") + " \"" + self.project.settings.name.getValue() + "\":"
else:
self["key_red"].text = ""
self["key_yellow"].text = ""
self["title_label"].text = _("Please add titles to the compilation.")
def updateSize(self):
size = self.project.size/(1024*1024)
MAX_DL = self.project.MAX_DL-100
MAX_SL = self.project.MAX_SL-100
print "updateSize:", size, "MAX_DL:", MAX_DL, "MAX_SL:", MAX_SL
if size > MAX_DL:
percent = 100 * size / float(MAX_DL)
self["space_label_dual"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_dual"].value = int(percent)
self["space_bar_single"].value = 100
self["space_label_single"].text = ""
self["medium_label"].setText(_("Exceeds dual layer medium!"))
self["medium_label"].setForegroundColorNum(2)
if self.previous_size < MAX_DL:
self.session.open(MessageBox,text = _("Exceeds dual layer medium!"), type = MessageBox.TYPE_ERROR)
elif size > MAX_SL:
percent = 100 * size / float(MAX_DL)
self["space_label_dual"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_dual"].value = int(percent)
self["space_bar_single"].value = 100
self["space_label_single"].text = ""
self["medium_label"].setText(_("Required medium type:") + " " + _("DUAL LAYER DVD") + ", %d MB " % (MAX_DL - size) + _("free"))
self["medium_label"].setForegroundColorNum(1)
if self.previous_size < MAX_SL:
self.session.open(MessageBox, text = _("Your collection exceeds the size of a single layer medium, you will need a blank dual layer DVD!"), timeout = 10, type = MessageBox.TYPE_INFO)
elif size < MAX_SL:
percent = 100 * size / float(MAX_SL)
self["space_label_single"].text = "%d MB (%.2f%%)" % (size, percent)
self["space_bar_single"].value = int(percent)
self["space_bar_dual"].value = 0
self["space_label_dual"].text = ""
self["medium_label"].setText(_("Required medium type:") + " " + _("SINGLE LAYER DVD") + ", %d MB " % (MAX_SL - size) + _("free"))
self["medium_label"].setForegroundColorNum(0)
self.previous_size = size
def getCurrentTitle(self):
t = self["titles"].getCurrent()
return t and t[0]
def editTitle(self, title = None, editor = True):
t = title or self.getCurrentTitle()
if t is not None:
self.current_edit_title = t
if editor:
self.session.openWithCallback(self.titleEditDone, TitleCutter.TitleCutter, t)
else:
self.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, t)
def titleEditDone(self, cutlist):
t = self.current_edit_title
t.titleEditDone(cutlist)
if t.VideoType != 0:
self.session.openWithCallback(self.DVDformatCB,MessageBox,text = _("The DVD standard doesn't support H.264 (HDTV) video streams. Do you want to create a Dreambox format data DVD (which will not play in stand-alone DVD players) instead?"), type = MessageBox.TYPE_YESNO)
else:
self.updateTitleList()
def resetTitles(self):
count = 0
for title in self.project.titles:
count += 1
title.initDVDmenuText(count)
self.updateTitleList()
def DVDformatCB(self, answer):
t = self.current_edit_title
if answer == True:
self.project.settings.authormode.setValue("data_ts")
self.updateTitleList()
else:
self.removeTitle(t)
def leave(self, close = False):
if not len(self["titles"].list) or close:
self.close()
else:
self.session.openWithCallback(self.exitCB, MessageBox,text = _("Your current collection will get lost!") + "\n" + _("Do you really want to exit?"), type = MessageBox.TYPE_YESNO)
def exitCB(self, answer):
print "exitCB", answer
if answer is not None and answer:
self.close() | gpl-2.0 |
arrabito/DIRAC | TransformationSystem/test/Test_replicationTransformation.py | 1 | 9922 | """Test the dirac-transformation-replication script and helper"""
import unittest
from mock import MagicMock as Mock, patch
from DIRAC import S_OK, S_ERROR
from DIRAC.TransformationSystem.Utilities.ReplicationTransformation import createDataTransformation
from DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters import Params
__RCSID__ = "$Id$"
GET_VOMS = "DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters.getVOMSVOForGroup"
GET_PROXY = "DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters.getProxyInfo"
def getProxyMock(success=True):
""" return value for getProxy """
if success:
return Mock(return_value=S_OK({'groupProperties': ['ProductionManagement'],
'group': 'clic_prod',
}))
return Mock(return_value=S_ERROR("Failed"))
def opMock():
""" return mock for config operations """
opmock = Mock()
opmock.getOptionsDict.return_value = S_OK({'trans': 'ProdID'})
opmock.getValue.return_value = 'ProdID'
return Mock(return_value=opmock)
class TestMoving(unittest.TestCase):
"""Test the creation of moving transformation"""
def setUp(self):
self.tClientMock = Mock()
self.tClientMock.createTransformationInputDataQuery.return_value = S_OK()
self.tMock = Mock(return_value=self.tClientMock)
def tearDown(self):
pass
def test_createRepl_1(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_Dry(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=False, extraData={})
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_2(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, extraname="extraName", enable=True)
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_SEFail_1(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(side_effect=(S_OK(), S_ERROR()))), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("TargetSE not valid", ret['Message'])
def test_createRepl_SEFail_2(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(side_effect=(S_ERROR(), S_ERROR()))), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("SourceSE not valid", ret['Message'])
def test_createRepl_addTrafoFail_(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_ERROR("Cannot add Trafo"))), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("Cannot add Trafo", ret['Message'])
def test_createRepl_createTrafoFail_(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
self.tClientMock.createTransformationInputDataQuery.return_value = S_ERROR("Failed to create IDQ")
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("Failed to create IDQ", ret['Message'])
class TestParams(unittest.TestCase):
"""Test the parameters for the moving creation script"""
def setUp(self):
self.arguments = []
self.sMock = Mock()
self.sMock.getPositionalArgs.return_value = self.arguments
self.params = Params()
def tearDown(self):
pass
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_checkSettings(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertTrue(ret['OK'], ret.get("Message", ''))
self.assertEqual(self.params.metaValues, ['12345'])
self.assertEqual(self.params.sourceSE, '')
self.assertEqual(self.params.targetSE, ["TargetSE"])
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_setMetadata(self):
ret = self.params.setMetadata("Datatype:GEN, Energy: 124")
self.assertTrue(ret['OK'], ret.get("Message", ''))
self.assertEqual(self.params.extraData, {'Datatype': 'GEN',
'Energy': '124'})
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_checkSettings_FailArgumentSize(self):
self.arguments = ['12345', "TargetSE", 'Foo']
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: Wrong number of arguments" in msg for msg in self.params.errorMessages))
@patch(GET_PROXY, new=getProxyMock(False))
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_FailProxy(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: No Proxy" in msg for msg in self.params.errorMessages), str(self.params.errorMessages))
@patch(GET_PROXY, new=getProxyMock(True))
@patch(GET_VOMS, new=Mock(return_value=''))
def test_FailProxy2(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: ProxyGroup" in msg for msg in self.params.errorMessages),
str(self.params.errorMessages))
def test_setExtraName(self):
ret = self.params.setExtraname("extraName")
self.assertTrue(ret['OK'], ret.get('Message', ""))
self.assertEqual("extraName", self.params.extraname)
| gpl-3.0 |
rotofly/odoo | addons/project_timesheet/report/task_report.py | 336 | 4030 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields,osv
from openerp import tools
class report_timesheet_task_user(osv.osv):
_name = "report.timesheet.task.user"
_auto = False
_order = "name"
def get_hrs_timesheet(self, cr, uid, ids, name, args, context):
result = {}
for record in self.browse(cr, uid, ids, context):
last_date = datetime.strptime(record.name, '%Y-%m-%d') + relativedelta(months=1) - relativedelta(days=1)
obj = self.pool.get('hr_timesheet_sheet.sheet.day')
sheet_ids = obj.search(cr, uid, [('sheet_id.user_id','=',record.user_id.id),('name','>=',record.name),('name','<=',last_date.strftime('%Y-%m-%d'))])
data_days = obj.read(cr, uid, sheet_ids, ['name','sheet_id.user_id','total_attendance'])
total = 0.0
for day_attendance in data_days:
total += day_attendance['total_attendance']
result[record.id] = total
return result
_columns = {
'name': fields.char('Date'),
'year': fields.char('Year', size=4, required=False, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'timesheet_hrs': fields.function(get_hrs_timesheet, string="Timesheet Hours"),
'task_hrs' : fields.float('Task Hours'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_timesheet_task_user')
cr.execute(""" create or replace view report_timesheet_task_user as (
select
((r.id*12)+to_number(months.m_id,'999999'))::integer as id,
months.name as name,
r.id as user_id,
to_char(to_date(months.name, 'YYYY/MM/DD'),'YYYY') as year,
to_char(to_date(months.name, 'YYYY/MM/DD'),'MM') as month,
(select sum(hours) from project_task_work where user_id = r.id and date between to_date(months.name, 'YYYY/MM/DD') and (to_date(months.name, 'YYYY/MM/DD') + interval '1 month' -
interval '1 day') ) as task_hrs
from res_users r,
(select to_char(p.date,'YYYY-MM-01') as name,
to_char(p.date,'YYYYMM') as m_id
from project_task_work p
union
select to_char(h.name,'YYYY-MM-01') as name,
to_char(h.name,'YYYYMM') as m_id
from hr_timesheet_sheet_sheet_day h) as months
group by
r.id,months.m_id,months.name,
to_char(to_date(months.name, 'YYYY/MM/DD'),'YYYY') ,
to_char(to_date(months.name, 'YYYY/MM/DD'),'MM')
) """)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_pwd.py | 58 | 3352 | import unittest
from test import test_support
import pwd
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assert_(isinstance(e.pw_name, basestring))
self.assertEqual(e[1], e.pw_passwd)
self.assert_(isinstance(e.pw_passwd, basestring))
self.assertEqual(e[2], e.pw_uid)
self.assert_(isinstance(e.pw_uid, int))
self.assertEqual(e[3], e.pw_gid)
self.assert_(isinstance(e.pw_gid, int))
self.assertEqual(e[4], e.pw_gecos)
self.assert_(isinstance(e.pw_gecos, basestring))
self.assertEqual(e[5], e.pw_dir)
self.assert_(isinstance(e.pw_dir, basestring))
self.assertEqual(e[6], e.pw_shell)
self.assert_(isinstance(e.pw_shell, basestring))
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip the rest
return
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assert_(pwd.getpwnam(e.pw_name) in entriesbyname[e.pw_name])
self.assert_(pwd.getpwuid(e.pw_uid) in entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# Choose a non-existent uid.
fakeuid = 4127
while fakeuid in byuids:
fakeuid = (fakeuid * 3) % 0x10000
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
def test_main():
test_support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
pedrohml/smartbot | setup.py | 1 | 1729 | import os
from setuptools import setup, find_packages
def read(*paths):
with open(os.path.join(*paths), 'r') as f:
return f.read()
def requirements():
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list
setup(name='smartbot',
version="1.0",
description='The most smart bot in telegram and slack',
keywords='python telegram slack bot smart api',
url='http://github.com/pedrohml/smartbot',
author='Pedro Lira',
author_email="pedrohml@gmail.com",
license='MIT',
install_requires=requirements(),
packages=find_packages(exclude=['tests*']),
scripts=['smartbot_full.py'],
data_files=[('config', ['config/smartbot_full.cfg', 'config/smartbot_full_ptBR.cfg'])],
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
]
)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.