repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Azure/azure-sdk-for-python | sdk/cosmos/azure-mgmt-documentdb/azure/mgmt/documentdb/models/database_account_connection_string.py | 5 | 1285 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DatabaseAccountConnectionString(Model):
"""Connection string for the DocumentDB account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar connection_string: Value of the connection string
:vartype connection_string: str
:ivar description: Description of the connection string
:vartype description: str
"""
_validation = {
'connection_string': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self):
self.connection_string = None
self.description = None
| mit |
erlichmen/jugoflogs | models/log.py | 1 | 2297 | import base64
import os
from google.appengine.ext import ndb
import urllib
import hashlib
from datetime import datetime
class Log(ndb.Expando):
from_user = ndb.StringProperty(required=True)
fullname = ndb.StringProperty(indexed=False)
email = ndb.StringProperty()
username = ndb.StringProperty()
facebookId = ndb.StringProperty()
created_at = ndb.DateTimeProperty(required=True)
body = ndb.StringProperty(required=True, indexed=False)
salt = ndb.StringProperty(required=True)
links = ndb.StringProperty(repeated=True)
message_id = ndb.StringProperty()
parent_id = ndb.StringProperty()
@property
def application_name(self):
return self.key.namespace()
@classmethod
def create_salt(cls):
return base64.urlsafe_b64encode(os.urandom(18))
@classmethod
def create(cls, app_name, sender, body=None, created_at=None, message_id=None, parent_id=None, **kwargs):
return cls(
namespace=app_name,
from_user=sender,
salt=cls.create_salt(),
created_at=created_at or datetime.utcnow(),
message_id=message_id,
parent_id=parent_id,
body=body.decode(),
**kwargs)
@property
def from_user_params(self):
import email.utils
return email.utils.parseaddr(self.from_user)
@property
def from_user_display_name(self):
return self.from_user_params[0]
@property
def from_user_email(self):
return self.from_user_params[1]
def gravatar_url(self, size=None):
email = self.from_user_email.lower()
default = "http://www.example.com/default.jpg"
size = size or 32
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d': default, 's': str(size)})
return gravatar_url
@property
def actual_links(self):
for link in self.links:
yield 'https://storage.googleapis.com' + link
@property
def file_names(self):
for link in self.links:
parts = link.split('/')
yield "/".join(parts[4:])
@property
def names_links(self):
return zip(self.file_names, self.actual_links) | mit |
gnu-sandhi/gnuradio | docs/sphinx/gnuradio_sphinx.py | 8 | 7224 | """
Customizations of sphinx for gnuradio use.
"""
from sphinx.ext.autodoc import py_ext_sig_re
from sphinx.ext.autodoc import ClassDocumenter, FunctionDocumenter, members_option
from sphinx.ext.autodoc import bool_option, members_set_option, identity
from sphinx.ext.autodoc import ALL
# A dictionary of the number of lines to delete from the beginning of docstrings
lines_to_delete = {}
def setup(sp):
# Fix line-breaks in signature.
sp.connect('autodoc-process-signature', fix_signature)
sp.connect('autodoc-process-docstring', remove_lines)
# Add node to autodocument signal-processing blocks.
sp.add_autodocumenter(OldBlockDocumenter)
sp.add_autodocumenter(BlockDocumenter)
sp.add_autodocumenter(PyBlockDocumenter)
def remove_lines(app, what, name, obj, options, lines):
del_lines = lines_to_delete.get(name, 0)
# Don't delete any lines if this is called again.
lines_to_delete[name] = 0
lines[:] = lines[del_lines:]
def fix_signature(app, what, name, obj, options, signature, return_annotation):
"""
SWIG produces signature at the top of docstrings of the form
'blah(int arg1, float arg2) -> return_type'
and if the string is long it breaks it over multiple lines.
Sphinx gets confused if it is broken over multiple lines.
fix_signature and remove_lines get around this problem.
"""
if return_annotation is not None:
return
if hasattr(obj, '__doc__'):
docs = obj.__doc__
else:
docs = None
if not docs:
return None
doclines = docs.split('\n')
del_lines = remove_linebreaks_in_signature(doclines)
# match first line of docstring against signature RE
match = py_ext_sig_re.match(doclines[0])
if not match:
return None
exmod, path, base, args, retann = match.groups()
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
lines_to_delete[name] = i - 1 + del_lines
# format args
signature = "({0})".format(args)
return signature, retann
def remove_linebreaks_in_signature(lines):
alllines = '\n'.join(lines)
alllines = alllines.lstrip()
bits = alllines.split('->')
if len(bits) == 1:
return 0
after = '->'.join(bits[1:])
after_lines = after.split('\n')
ending = None
remainder = []
for line in after_lines:
if line and ending is None:
ending = line
elif ending is not None:
remainder.append(line)
first_line = ' '.join([a.strip() for a in bits[0].split('\n') if a.strip()]) + ' -> ' + ending.strip()
match = py_ext_sig_re.match(first_line)
# If it is a signature, make the change to lines.
if match:
new_lines = [first_line] + remainder
lines[:] = new_lines
return len(bits[0].split('\n'))
else:
return 0
# These methods are not displayed in the documentation of blocks to
# avoid redundancy.
common_block_members =[
'check_topology',
'detail',
'history',
'input_signature',
'name',
'nitems_read',
'nitems_written',
'nthreads',
'output_multiple',
'output_signature',
'relative_rate',
'set_detail',
'set_nthreads',
'start',
'stop',
'thisown',
'to_basic_block',
'unique_id',
'make',
'alias',
'is_set_max_noutput_items',
'max_noutput_items',
'max_output_buffer',
'message_ports_in',
'message_ports_out',
'min_output_buffer',
'pc_input_buffers_full',
'pc_input_buffers_full_var',
'pc_noutput_items',
'pc_noutput_items_var',
'pc_nproduced',
'pc_nproduced_var',
'pc_output_buffers_full',
'pc_output_buffers_full_var',
'pc_work_time',
'pc_work_time_var',
'processor_affinity',
'set_block_alias',
'set_max_noutput_items',
'unset_max_noutput_items',
'set_max_output_buffer',
'set_min_output_buffer',
'set_processor_affinity',
'symbol_name',
'unset_processor_affinity', ]
class OldBlockDocumenter(FunctionDocumenter):
"""
Specialized Documenter subclass for gnuradio blocks.
It merges together the documentation for the generator function (e.g. gr.head)
with the wrapped sptr (e.g. gr.gr_head_sptr) to keep the documentation
tidier.
"""
objtype = 'oldblock'
directivetype = 'function'
# Don't want to use this for generic functions for give low priority.
priority = -10
def __init__(self, *args, **kwargs):
super(OldBlockDocumenter, self).__init__(*args, **kwargs)
# Get class name
bits = self.name.split('.')
if len(bits) != 3 or bits[0] != 'gnuradio':
raise ValueError("expected name to be of form gnuradio.x.y but it is {0}".format(self.name))
sptr_name = 'gnuradio.{0}.{0}_{1}_sptr'.format(bits[1], bits[2])
# Create a Class Documenter to create documentation for the classes members.
self.classdoccer = ClassDocumenter(self.directive, sptr_name, indent=self.content_indent)
self.classdoccer.doc_as_attr = False
self.classdoccer.real_modname = self.classdoccer.get_real_modname()
self.classdoccer.options.members = ALL
self.classdoccer.options.exclude_members = common_block_members
self.classdoccer.parse_name()
self.classdoccer.import_object()
def document_members(self, *args, **kwargs):
return self.classdoccer.document_members(*args, **kwargs)
class BlockDocumenter(FunctionDocumenter):
"""
Specialized Documenter subclass for new style gnuradio blocks.
It merges together the documentation for the generator function (e.g. wavelet.squash_ff)
with the wrapped sptr (e.g. wavelet.squash_ff_sptr) to keep the documentation
tidier.
"""
objtype = 'block'
directivetype = 'function'
# Don't want to use this for generic functions for give low priority.
priority = -10
def __init__(self, *args, **kwargs):
super(BlockDocumenter, self).__init__(*args, **kwargs)
# Get class name
sptr_name = self.name + '_sptr'
# Create a Class Documenter to create documentation for the classes members.
self.classdoccer = ClassDocumenter(self.directive, sptr_name, indent=self.content_indent)
self.classdoccer.doc_as_attr = False
self.classdoccer.real_modname = self.classdoccer.get_real_modname()
self.classdoccer.options.members = ALL
self.classdoccer.options.exclude_members = common_block_members
self.classdoccer.parse_name()
self.classdoccer.import_object()
def document_members(self, *args, **kwargs):
return self.classdoccer.document_members(*args, **kwargs)
class PyBlockDocumenter(ClassDocumenter):
"""
Specialized Documenter subclass for hierarchical python gnuradio blocks.
"""
objtype = 'pyblock'
directivetype = 'class'
def __init__(self, *args, **kwargs):
super(PyBlockDocumenter, self).__init__(*args, **kwargs)
self.options.members = ALL
self.options.exclude_members = common_block_members
| gpl-3.0 |
abstract-open-solutions/account-invoicing | account_invoice_template/__openerp__.py | 30 | 2055 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Account Invoice Template",
'version': '0.1',
'category': 'Generic Modules/Accounting',
'description': """
Templates for Invoices
User can configure invoice templates, useful for recurring invoices.
The amount of each template line can be computed (through python code)
or kept as user input. If user input, when using the template, user has to fill
the amount of every input lines.
The invoice form allows lo load, through a wizard, the template to use and the
amounts to fill.
Contributors
------------
Lorenzo Battistini <lorenzo.battistini@agilebg.com>
Leonardo Pistone <leonardo.pistone@camptocamp.com>
Franco Tampieri <franco@tampieri.info>
""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['account_move_template'],
"data": [
'invoice_template.xml',
'wizard/select_template.xml',
'security/ir.model.access.csv',
],
"active": False,
"installable": False
}
| agpl-3.0 |
Avsecz/concise | tests/preprocessing/test_sequence.py | 2 | 4100 | import pytest
from concise.preprocessing import encodeDNA
from concise.preprocessing.sequence import (pad_sequences, DNA, tokenize,
token2one_hot, encodeSequence)
import numpy as np
def test_tokenize():
assert tokenize("ACGTTA", DNA, neutral_vocab="N") == [0, 1, 2, 3, 3, 0]
assert tokenize("ACGTGATGA", ["ACG", "TGA"], neutral_vocab="NNN") == [0, 1, 1]
assert tokenize("ACGTGATGA", ["ACG"], neutral_vocab="TGA") == [0, -1, -1]
with pytest.raises(Exception):
tokenize("ACGTGATGA", ["ACG"], neutral_vocab="NNN")
def test_token2one_hot():
assert np.array_equal(token2one_hot([0, 1, -1], 2), np.array([[1, 0],
[0, 1],
[0, 0]]))
def test_encodeSequence():
assert encodeSequence(["ACG", "TGT"], DNA, "N").shape == (2, 3, 4)
et = encodeSequence(["ACG", "TGT"], DNA, "N", encode_type="token")
assert et.shape == (2, 3)
assert np.array_equal(et, np.array([[1, 2, 3],
[4, 3, 4]]))
# TODO - should I increase the index by one?
et = encodeSequence(["ACG", "TGTN"], DNA, "N", encode_type="token")
assert np.array_equal(et, np.array([[1, 2, 3, 0],
[4, 3, 4, 0]]))
def test_encodeDNA():
seq = "ACGTTTATNT"
assert len(seq) == 10
with pytest.raises(ValueError):
encodeDNA(seq)
assert encodeDNA([seq]).shape == (1, 10, 4)
assert encodeDNA([seq], maxlen=20).shape == (1, 20, 4)
assert encodeDNA([seq], maxlen=5).shape == (1, 5, 4)
assert np.all(encodeDNA([seq])[0, 0] == np.array([1, 0, 0, 0]))
assert np.all(encodeDNA([seq])[0, 1] == np.array([0, 1, 0, 0]))
assert np.all(encodeDNA([seq])[0, 2] == np.array([0, 0, 1, 0]))
assert np.all(encodeDNA([seq])[0, 3] == np.array([0, 0, 0, 1]))
assert np.all(encodeDNA([seq])[0, 4] == np.array([0, 0, 0, 1]))
assert np.all(encodeDNA([seq])[0, -1] == np.array([0, 0, 0, 1]))
assert np.all(encodeDNA([seq])[0, -2] == np.array([0, 0, 0, 0]))
def test_pad_sequences():
sequence_vec = ["ACGTTTATNT"]
assert len(pad_sequences(sequence_vec, value="N",
maxlen=20, align="end")[0]) is 20
# works with lists
assert pad_sequences([[1, 2, 3], [2, 2, 3, 4], [31, 3], [4, 2]], value=[0],
maxlen=5) == [[0, 0, 1, 2, 3],
[0, 2, 2, 3, 4],
[0, 0, 0, 31, 3],
[0, 0, 0, 4, 2]]
assert pad_sequences([[1, 2, 3], [2, 2, 3, 4], [31, 3], [4, 2]], value=[0],
maxlen=2, align="end") == [[2, 3],
[3, 4],
[31, 3],
[4, 2]]
assert pad_sequences([[1, 2, 3], [2, 2, 3, 4], [31, 3], [4, 2]], value=[0],
maxlen=2, align="start") == [[1, 2],
[2, 2],
[31, 3],
[4, 2]]
# expect error
with pytest.raises(ValueError):
pad_sequences(sequence_vec, value="NNN", align="end")
sequence_vec = ["ACGTTTATC"]
assert len(pad_sequences(sequence_vec, value="NNN", align="end")[0]) == 9
with pytest.raises(ValueError):
pad_sequences(sequence_vec, value="NNN", maxlen=10, align="end")
assert len(pad_sequences(sequence_vec, value="NNN", maxlen=12, align="end")[0]) == 12
def test_pad_sequences():
sequence_vec = ['CTTACTCAGA', 'TCTTTA']
assert pad_sequences(sequence_vec, 10, align="start", value="N") == ['CTTACTCAGA', 'TCTTTANNNN']
assert pad_sequences(sequence_vec, 10, align="end", value="N") == ['CTTACTCAGA', 'NNNNTCTTTA']
assert pad_sequences(sequence_vec, 4, align="center", value="N") == ['ACTC', 'CTTT']
| mit |
Chase235/cloudcourse | core/rules_impl.py | 6 | 31487 | #!/usr/bin/python2.4
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable-msg=W0231
"""This module contains implementations for various rules."""
# Supress pylint invalid import order
# pylint: disable-msg=C6203
# Supress pylint invalid use of super on old style class
# pylint: disable-msg=E1002
import datetime
import logging
import time
import appenginepatcher
from django.utils import translation
from google.appengine.ext import db
from core import errors
from core import models
from core import notifications
from core import request_cache
from core import rules
from core import service_factory
from core import utils
_ = translation.ugettext
STATUS = utils.RegistrationStatus
class MaxNumberRegisteredBy(rules.RuleRegisterResource):
"""Abstract class to limit the number of people who can be registered.
Attributes:
max_people: Maximum number of people who can registered.
"""
# Key to keep track of how many students are currently enrolled.
_KEY_PREFIX = '_'
def __init__(self, max_people, *args, **kargs):
super(MaxNumberRegisteredBy, self).__init__(*args, **kargs)
self.max_people = max_people
def _NormalizeKey(self, key):
if not key:
key = self._KEY_PREFIX
# we normalize the key to a string
return str(key)
def _Evaluate(self, unused_initial_state, target_state, key=None):
"""Proxy around RuleRegister.Evaluate to deal with memcache state/keys.
Args:
unused_initial_state: See RuleRegister.Evaluate
target_state: See RuleRegister.Evaluate
key: The resource key to be used for maintaining the state in mem cache
as far as incrementing/decrementing is concerned.
Returns:
Returns value that RuleRegister.Evaluate() should return.
"""
key = self._NormalizeKey(key)
resource_remaining = None
if target_state == utils.RegistrationStatus.ENROLLED:
num_students = self._Incr(key)
if num_students is None:
# Cannot look up the value, we waitlist.
logging.error('Could not increment key, val, namespace [%s,%s,%s]', key,
self._Get(key, 0), self.namespace)
if self.online:
value = utils.RegistrationStatus.WAITLISTED
resource_remaining = 0 # Conservative estimate.
else: # in offline mode we fail and retry again.
assert False, 'Could not access memcache'
elif num_students <= self.max_people:
value = utils.RegistrationStatus.ENROLLED
resource_remaining = self.max_people - num_students
else:
value = utils.RegistrationStatus.WAITLISTED
resource_remaining = self.max_people - num_students
if self.offline:
self._Decr(key)
# In online mode we don't decrement the counter because we want this
# rule to stay in waiting state for all further requests.
# Reasoning is that we don't want another user get ENROLLED after one
# got WAITING for fairness reasons (since the WAITING one may get denied
# afterwards with back-end processing)
else:
# This rule only tries to limit number of enrollments. Anything else is OK
value = target_state
resources_used = self._Get(key)
if resources_used is None:
resources_used = self.max_people
# Can t lookup resources used, we take a conservative approach
resource_remaining = self.max_people - resources_used
# We build the limiting resource key
contextualized_key = self.key + key
return {'status': value, 'rule_tags': [contextualized_key],
'resource_remaining': resource_remaining}
def _ProcessOnlineOutcome(self, eval_state, final_state, key):
if eval_state == utils.RegistrationStatus.UNREGISTERED:
if final_state == utils.RegistrationStatus.UNREGISTERED:
# Need to decrement the counter. This _ProcessOutcome for UNREGISTERED
# is only being called by the offline process on the online rules.
# This is the only case where the offline process impacts the online
# context. Note that the online process never notifies offline context.
self._Decr(key)
else:
assert eval_state in [utils.RegistrationStatus.WAITLISTED,
utils.RegistrationStatus.ENROLLED]
if final_state is None:
# We incremented and dont need to.
self._Decr(key)
def _ProcessOfflineOutcome(self, eval_state, final_state, key):
if eval_state == utils.RegistrationStatus.UNREGISTERED:
if final_state == utils.RegistrationStatus.UNREGISTERED:
# we get notified but we never processed this request since this rule
# does not act on unregister actions.
self._Decr(key)
elif eval_state == utils.RegistrationStatus.ENROLLED:
if final_state != utils.RegistrationStatus.ENROLLED:
# We incremented and dont need to.
self._Decr(key)
else:
assert eval_state == utils.RegistrationStatus.WAITLISTED
if final_state == utils.RegistrationStatus.ENROLLED:
# We did not increment and need to.
self._Incr(key)
def _ProcessOutcome(self, eval_state, final_state, key=None):
key = self._NormalizeKey(key)
if self.offline:
self._ProcessOfflineOutcome(eval_state, final_state, key)
else:
self._ProcessOnlineOutcome(eval_state, final_state, key)
def ProcessOutcome(self, eval_state, final_state):
self._ProcessOutcome(eval_state, final_state)
class MaxNumberRegisteredByActivity(MaxNumberRegisteredBy):
"""Limits the number of people who can be registered for an activity.
Attributes:
max_people: Int. Maximum number of people who can register for the activity.
"""
def __init__(self, max_people, *args, **kargs):
super(MaxNumberRegisteredByActivity, self).__init__(max_people, *args,
**kargs)
def Evaluate(self, initial_state, target_state):
return MaxNumberRegisteredBy._Evaluate(self, initial_state, target_state,
None)
# Suppress pylint unused argument for overriden method
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Overrides parent method."""
return [rule_config.key+cls._KEY_PREFIX]
def _BuildContext(self):
"""Overrides parent method."""
query = models.UserRegistration.all()
query.filter('activity = ', self.eval_context.activity)
_UpdateQueryMode(query, self.offline)
num_students = 0
for reg in query:
if _RegistrationNeedsAccounting(reg, self.offline):
num_students += 1
return {self._KEY_PREFIX: num_students}
@classmethod
def GetDescription(cls):
return _('Limited slots for activity.')
class MaxNumberRegisteredByAccessPoint(MaxNumberRegisteredBy):
"""Limits the number of people who can be registered for an access point.
Due to datastore limitations, an access point cannot accept more than 1000
people at this time.
Attributes:
max_people: Maximum number of people for that access point.
access_point_key: List of AccessPoint keys to be used for this rule.
"""
def __init__(self, max_people, access_point_keys, *args, **kargs):
super(MaxNumberRegisteredByAccessPoint, self).__init__(max_people, *args,
**kargs)
self.access_point_keys = access_point_keys
def _BuildContext(self):
"""Overrides parent method."""
return self._BuildContextFromAccessPoints(self.access_point_keys)
@classmethod
def CanProcessMultipleSchedules(cls):
return False
def _BuildContextFromAccessPoints(self, access_point_keys):
"""Builds a context from a list of access point keys.
Args:
access_point_keys: List of access point keys.
Returns:
A dictionary of key/values representing the context.
"""
query = models.UserRegistration.all()
query.filter('activity = ', self.eval_context.activity)
_UpdateQueryMode(query, self.offline)
keys = {}
for reg in query:
if _RegistrationNeedsAccounting(reg, self.offline):
for schedule_key, ap_key in zip(reg.schedule_list,
reg.access_point_list):
if ap_key in access_point_keys:
sched_key = str(schedule_key)
# This user registration is relevant to this rule
keys[sched_key] = keys.get(sched_key, 0) + 1
return keys
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
return self._EvaluateAccessPoints(initial_state, target_state,
self.access_point_keys)
def _EvaluateAccessPoints(self, initial_state, target_state,
access_point_keys):
"""Evaluates the rule based on the given list of access point keys."""
# TODO(user): append .value once we have the right evalcontext
# We take the first schedule/access point because this rule can not process
# multiple schedules. As such, its context is populated with only one entry
# at the rules level.
access_point_key = self.eval_context.access_point_list[0].key()
schedule_key = self.eval_context.schedule_list[0].key()
if access_point_key in access_point_keys:
# This rule applies to this schedule
return self._Evaluate(initial_state, target_state,
schedule_key)
else:
return {'status': target_state, 'resource_remaining': None,
'rule_tags': []}
def _ProcessOutcomeAccessPoints(self, eval_state, final_state,
access_point_keys):
for schedule, access_point in zip(self.eval_context.schedule_list,
self.eval_context.access_point_list):
if access_point.key() in access_point_keys:
self._ProcessOutcome(eval_state, final_state, schedule.key())
def ProcessOutcome(self, eval_state, final_state):
return self._ProcessOutcomeAccessPoints(eval_state, final_state,
self.access_point_keys)
@classmethod
def GetDescription(cls):
return _('Limited slots for attending location')
class MaxNumberRegisteredByAccessPointTag(MaxNumberRegisteredByAccessPoint):
"""Limits the number of people who can be registered for an access point tag.
Every access point can have a tag associated with it. For example, both
'Lincoln Center' and 'War Room' access points can have NYC tag.
This rule can enforce that no more than 20 people can register with NYC.
Attributes:
max_people: Maximum number of people for that access point tag.
access_point_tags: A string list of tags to be used for this rule.
"""
def __init__(self, max_people, access_point_tags, *args, **kargs):
ap_keys = kargs['eval_context'].activity.GetAccessPoints()
relevant_keys = _GetRelevantAccessPointKeys(ap_keys, access_point_tags)
super(MaxNumberRegisteredByAccessPointTag, self).__init__(max_people,
relevant_keys,
*args, **kargs)
@classmethod
def GetDescription(cls):
return _('Limited slots for attending location type')
def _GetRelevantAccessPointKeys(access_point_keys, access_point_tags):
"""Returns a list of access point keys relevant to given tags.
Args:
access_point_keys: A list of AccessPoint keys.
access_point_tags: A string list of access point tags.
Returns:
The subset of access_point_keys which corresponding access points have ALL
the given access_point_tags.
"""
# Get relevant access points from tags
aps_from_tags = _GetAccessPointsWithTags(access_point_tags)
# Extract relevant access point keys
ap_keys_from_tags = [x.key() for x in aps_from_tags]
# Interesect relevant access point keys with user input.
access_point_keys = set(ap_keys_from_tags).intersection(access_point_keys)
return access_point_keys
class TimeFrameRegistrationByActivity(rules.RuleRegister):
"""Limits the time frame for registration based for an activity.
This rule limits the time frame in which people can register for a
particular activity. People cannot register after the time frame has elapsed.
If someone registers before the time frame, that person will be placed on the
waiting list for the particular access point. Once the time frame arrives the
person will be automatically enrolled - as long as other rules are satisfied.
Attributes:
start_time: Datetime at which people can start registering.
end_time: Datetime after which people cannot register.
"""
def __init__(self, start_time, end_time, *args, **kargs):
super(TimeFrameRegistrationByActivity, self).__init__(*args, **kargs)
self.start_time = datetime.datetime.fromtimestamp(start_time)
self.end_time = datetime.datetime.fromtimestamp(end_time)
def Evaluate(self, initial_state, target_state):
if target_state == utils.RegistrationStatus.ENROLLED:
value = _CanRegisterTimeWindows(initial_state,
self.eval_context.queue_time,
self.start_time, self.end_time)
else:
value = target_state
return {'status': value}
@classmethod
def GetDescription(cls):
return _('Registration window.')
class TimeFrameRegistrationByAccessPointTag(rules.RuleRegister):
"""Limits the time frame for registration based on access point tag.
This rule limits the time frame in which people can register for a
particular activity / access point tag. People cannot register after the time
frame has elapsed.
If someone registers before the time frame, that person will be placed on the
waiting list. Once the time frame arrives the person will be automatically
enrolled as long as other rules are satisfied.
Attributes:
start_time: Datetime at which people can start registering.
end_time: Datetime after which people cannot register any more.
access_point_tags: List of access point tags for this rule.
"""
def __init__(self, start_time, end_time, access_point_tags=None,
*args, **kargs):
super(TimeFrameRegistrationByAccessPointTag, self).__init__(*args, **kargs)
self.start_time = datetime.datetime.fromtimestamp(start_time)
self.end_time = datetime.datetime.fromtimestamp(end_time)
self.access_point_tags = access_point_tags
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if target_state == utils.RegistrationStatus.ENROLLED:
ap_keys = [ap.key() for ap in self.eval_context.access_point_list]
aps = _GetRelevantAccessPointKeys(ap_keys, self.access_point_tags)
if aps:
# this rule applies
return {'status': _CanRegisterTimeWindows(initial_state,
self.eval_context.queue_time,
self.start_time,
self.end_time)}
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Registration window for attending location.')
class TimeCancelByActivity(rules.RuleRegister):
"""Enforces a time limit for late registration cancels by activity.
This rule enforces a time limit after which users will not be able to
unregister from a particular activity.
Attributes:
time_to_activity: Time in seconds until activity starts.
"""
def __init__(self, time_to_activity, *args, **kargs):
super(TimeCancelByActivity, self).__init__(*args, **kargs)
self.time_to_activity = time_to_activity
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if (target_state == utils.RegistrationStatus.UNREGISTERED
and initial_state == utils.RegistrationStatus.ENROLLED):
# It is OK to use local time with mktime as long as all datetimes are in
# the same timezone.
start_time = self.eval_context.activity.start_time
activity_start = time.mktime(start_time.timetuple())
deadline = activity_start - self.time_to_activity
if time.mktime(self.eval_context.queue_time.timetuple()) < deadline:
return {'status': utils.RegistrationStatus.UNREGISTERED}
else:
return {'status': initial_state}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Unregister deadline.')
class ManagerApproval(rules.RuleRegister):
"""Enforces students to require manager approval before attending a course."""
def __init__(self, *args, **kargs):
super(ManagerApproval, self).__init__(*args, **kargs)
self._check_and_create_approval = False
def _GetRuleTag(self):
return '%s_%s_%s' % (self.key, self.eval_context.activity.key(),
self.eval_context.user.appengine_user)
def _GetUserManager(self):
"""Gets the manager users.User object of the registering user."""
if not appenginepatcher.on_production_server:
# In dev mode user is her own manager. Change it to another user to
# test. Else registration will go preapproved. No workflow.
return self.eval_context.user.appengine_user
if not hasattr(self, '_manager'):
student_email = self.eval_context.user.appengine_user.email()
user_service = service_factory.GetUserInfoService()
manager_info = user_service.GetManagerInfo(student_email)
if manager_info is None:
self._manager = None
else:
self._manager = utils.GetAppEngineUser(manager_info.primary_email)
return self._manager
def _IsPreApproved(self):
try:
# Check if manager is trying to enroll the user through batch enrollment.
return self._GetUserManager() == self.eval_context.creator.appengine_user
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
logging.error('[%s] %s', type(exception), exception)
assert self.online # We dont fail online, just assume, not pre approved.
return False
def _IsPreDeclined(self):
try:
return self._GetUserManager() is None # Dont know person or her manager.
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
if not self.online:
# We dont fail online, just assume, not disapproved.
raise exception
return False
def _GetApprovalKey(self):
"""Key to be used on the approval object."""
return db.Key.from_path(models.ManagerApproval.kind(),
self._GetRuleTag())
def _GetUsableApproval(self):
approval_key = self._GetApprovalKey()
approval = request_cache.GetEntityFromKey(approval_key)
if approval is not None:
time_diff = abs(approval.queue_time - self.eval_context.queue_time)
allow_delta = datetime.timedelta(seconds=1)
if not approval.approved and time_diff >= allow_delta:
# Allows user to re-ask approval if denied previously.
approval = None # Dont use the approval object.
return approval
def _CheckAndInitiateApprovalProcess(self):
"""Initiates approval process if necessary."""
approval = self._GetUsableApproval()
if approval is None:
# Send email to manager to approve user request.
dummy_registration = models.UserRegistration(
eval_context=self.eval_context,
status=utils.RegistrationStatus.WAITLISTED,
confirmed=utils.RegistrationConfirm.PROCESSED,
active=utils.RegistrationActive.ACTIVE)
notifications.SendMail(
dummy_registration,
notifications.NotificationType.MANAGER_APPROVAL_REQUEST,
to=self._GetUserManager().email(),
cc=self.eval_context.user.appengine_user.email(),
extra_context={'approval_key': str(self._GetApprovalKey())})
# Write an approval entity to datastore.
approval_entity = models.ManagerApproval(
key_name=self._GetRuleTag(),
candidate=self.eval_context.user.appengine_user,
manager=self._GetUserManager(),
activity=self.eval_context.activity.key(),
program=self.eval_context.program.key(),
nominator=self.eval_context.creator.appengine_user,
approved=False,
manager_decision=False,
queue_time=self.eval_context.queue_time,
)
approval_entity.put()
def Evaluate(self, unused_initial_state, target_state):
"""Overrides parent method."""
if rules.IsPredictionMode():
return {'status': target_state, 'rule_tags': [self.key]}
return_status = target_state # By default accept transition.
rule_tag = None
if target_state == utils.RegistrationStatus.ENROLLED:
if self._IsPreDeclined():
return_status = None # Non google.com account or no manager.
elif not self._IsPreApproved():
rule_tag = self._GetRuleTag()
approval = self._GetUsableApproval()
if approval is None: # No usable approval, workflow to be initiated.
return_status = utils.RegistrationStatus.WAITLISTED
self._check_and_create_approval = True
elif not approval.manager_decision: # Manager did not decide.
return_status = utils.RegistrationStatus.WAITLISTED
elif not approval.approved: # Manager decided and declined.
return_status = None
rule_tags = [self.key]
if rule_tag is not None:
rule_tags.append(rule_tag)
return {'status': return_status, 'rule_tags': rule_tags}
def ProcessOutcome(self, eval_state, final_state):
"""Process the result of rule evaluation to manage rule state."""
if self.online: return # Nothing to do during online mode.
if (final_state == utils.RegistrationStatus.WAITLISTED and
self._check_and_create_approval):
assert eval_state == utils.RegistrationStatus.WAITLISTED
# Initiate the manager approval workflow.
self._CheckAndInitiateApprovalProcess()
# Suppress pylint unused argument for overriden method
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Overrides parent method."""
return [rule_config.key]
@classmethod
def GetDescription(cls):
return _('Needs manager approval.')
class TimeCancelByAccessPointTag(rules.RuleRegister):
"""Enforces a time limit for late registration cancels by access point tag.
This rule enforces a time limit after which users will not be able to
unregister from a particular access point tag.
Attributes:
time_to_activity: Time in seconds until activity starts.
access_point_tags: List of access point tags for this rule.
Example:
TimeCancelByAccessPointTag(3600) will allow users to unregister until up
to 1 hour before the activity starts.
"""
def __init__(self, time_to_activity, access_point_tags=None,
*args, **kargs):
super(TimeCancelByAccessPointTag, self).__init__(*args, **kargs)
self.time_to_activity = time_to_activity
self.access_point_tags = access_point_tags
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if (target_state == utils.RegistrationStatus.UNREGISTERED
and initial_state == utils.RegistrationStatus.ENROLLED):
ap_keys = [ap.key() for ap in self.eval_context.access_point_list]
aps = _GetRelevantAccessPointKeys(ap_keys, self.access_point_tags)
if aps:
# this rule applies
start_time = self.eval_context.activity.start_time
activity_start = time.mktime(start_time.timetuple())
deadline = activity_start - self.time_to_activity
if time.mktime(self.eval_context.queue_time.timetuple()) < deadline:
return {'status': utils.RegistrationStatus.UNREGISTERED}
else:
return {'status': initial_state}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Unregister deadline for attending location type.')
class EmployeeTypeRestriction(rules.RuleRegister):
"""Restricts enrollment based on employee type.
Attributes:
employee_types: List of utils.EmployeeType.XXX choices.
"""
def __init__(self, employee_types,
*args, **kargs):
super(EmployeeTypeRestriction, self).__init__(*args, **kargs)
self.employee_types = employee_types
# Supress pylint unused argument, overriding parent method
# pylint: disable-msg=W0613
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if target_state == STATUS.ENROLLED:
# Retrieve employee type from user service
exception = None
person = None
email = self.eval_context.user.email
try:
user_service = service_factory.GetUserInfoService()
person = user_service.GetUserInfoMulti([email]).get(email)
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
logging.error('[%s] %s', type(exception), exception)
# In dev we just let the user to be enrolled. (No user service in dev).
if not appenginepatcher.on_production_server:
return {'status': STATUS.ENROLLED}
if exception is not None: # Prod user info service problems.
if self.online: # Production online case, we waitlist.
return {'status': STATUS.WAITLISTED}
# Production offline case, we raise exception.
logging.info('User[%s] lookup failed', email)
raise exception
# Prod mode, no exception and didn't find user using user hr service.
if person is None:
logging.info('Can not lookup user [%s]',
self.eval_context.user)
return {'status': None} # Not allowed if cannot lookup user.
logging.info('Person type for %s is %s, allowing only %s',
email, person.employee_type, self.employee_types)
if person.employee_type in self.employee_types:
return {'status': STATUS.ENROLLED}
# Not allowed.
return {'status': None}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Restricted by employee types.')
def _UpdateQueryMode(query, offline):
"""Updates a query filters based on offline mode.
Args:
query: The query to be updated.
offline: A boolean to indicate offline mode.
Returns:
A list of queries that give the relevant registrations
"""
if offline:
query.filter('status =', utils.RegistrationStatus.ENROLLED)
query.filter('confirmed =', utils.RegistrationConfirm.PROCESSED)
return query
def _RegistrationNeedsAccounting(reg, offline):
"""Returns True if the given registration needs to be accounted.
This method can further filter the registrations that are relevant to a query
in memory. Can be used for filtering that aren't easy to make on the datastore
and are best done in memory. For completeness of the function though the logic
in _UpdateQueryMode is also replicated to have this function usable on its own
even without filtering at the datastore query level.
Args:
reg: User registration
offline: A Boolean to indicate offline mode
Returns:
True if the given user registration needs to be taken into account when
building a context.
"""
if offline:
return (reg.status == utils.RegistrationStatus.ENROLLED and
reg.confirmed == utils.RegistrationConfirm.PROCESSED)
else:
res1 = reg.active == utils.RegistrationActive.ACTIVE
# Count registrations in transition/temporary state.
res2 = (reg.status == utils.RegistrationStatus.ENROLLED and
reg.confirmed == utils.RegistrationConfirm.NOT_READY)
# Count unregistrations that aren't yet processed by offline process. Count
# registrations that are not active anymore since unregisterOnline has
# marked them inactive. These take up resources until the offline process
# deletes the whole register-unregister entity group.
res3 = (reg.status == utils.RegistrationStatus.UNREGISTERED and
reg.confirmed == utils.RegistrationConfirm.READY)
return res1 or res2 or res3
def _CanRegisterTimeWindows(initial_state, queue_time, start_time, end_time):
"""Checks if user is allowed to register based on given time window.
If a user tries to register before the time window opens, the user is placed
on the waiting list.
Args:
initial_state: Initial state of user when registering.
queue_time: Datetime of the user request.
start_time: Datetime date of registration window.
end_time: Datetime end of registration window.
Returns:
A rules.RuleResultRegister.STATUS_XXX value or initial_state if outside
time window.
"""
if queue_time > start_time and queue_time < end_time:
value = utils.RegistrationStatus.ENROLLED
else:
format = '%Y-%m-%d %I:%M%p'
logging.debug('Can not register in time window [%s - %s] for queue time %s',
start_time.strftime(format), end_time.strftime(format),
queue_time.strftime(format))
value = initial_state
return value
def _GetAccessPointsWithTags(access_point_tags):
"""Returns access points which contain every tag from the input.
Args:
access_point_tags:
A list of strings representing access point tags, with a maximum of 30
entries.
Returns:
An iterator of AccessPoint such that every AccessPoint contains all the tags
specified in access_point_tags.
"""
query = models.AccessPoint.all()
for tag in access_point_tags:
# We need access points which have EVERY tag from access_point_tags.
# The IN <list> clause in appengine returns entries which have ANY of the
# tags in the <list>. So we need to build multiple IN clauses to get a full
# match.
query.filter('tags in ', [tag])
return query
class LockPastActivity(rules.RuleRegister):
"""Locks registrations for activities in the past."""
def __init__(self, *args, **kargs):
super(LockPastActivity, self).__init__(*args, **kargs)
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if self.online: # Only operate on online mode, accept everything offline.
lock_time = self.eval_context.activity.start_time
if self.eval_context.queue_time > lock_time:
return {'status': initial_state} # Deny any state changes.
return {'status': target_state} # Accept transition.
@classmethod
def GetDescription(cls):
return _('Registrations locked for past activities.')
| apache-2.0 |
eeshangarg/zulip | zerver/tests/test_realm_emoji.py | 3 | 12300 | from unittest import mock
from zerver.lib.actions import check_add_realm_emoji, do_create_realm, do_create_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_test_image_file
from zerver.models import Realm, RealmEmoji, UserProfile, get_realm
class RealmEmojiTest(ZulipTestCase):
def create_test_emoji(self, name: str, author: UserProfile) -> RealmEmoji:
with get_test_image_file("img.png") as img_file:
realm_emoji = check_add_realm_emoji(
realm=author.realm, name=name, author=author, image_file=img_file
)
if realm_emoji is None:
raise Exception("Error creating test emoji.") # nocoverage
return realm_emoji
def create_test_emoji_with_no_author(self, name: str, realm: Realm) -> RealmEmoji:
realm_emoji = RealmEmoji.objects.create(realm=realm, name=name)
return realm_emoji
def test_list(self) -> None:
emoji_author = self.example_user("iago")
self.login_user(emoji_author)
self.create_test_emoji("my_emoji", emoji_author)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
self.assert_length(result.json()["emoji"], 2)
def test_list_no_author(self) -> None:
self.login("iago")
realm = get_realm("zulip")
realm_emoji = self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assert_length(content["emoji"], 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji["author_id"])
def test_list_admins_only(self) -> None:
# Test that realm emoji list is public and realm emojis
# having no author are also there in the list.
self.login("othello")
realm = get_realm("zulip")
realm.add_emoji_by_admins_only = True
realm.save()
realm_emoji = self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assert_length(content["emoji"], 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji["author_id"])
def test_upload(self) -> None:
user = self.example_user("iago")
email = user.email
self.login_user(user)
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
realm_emoji = RealmEmoji.objects.get(name="my_emoji")
self.assertEqual(realm_emoji.author.email, email)
result = self.client_get("/json/realm/emoji")
content = result.json()
self.assert_json_success(result)
self.assert_length(content["emoji"], 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIn("author_id", test_emoji)
author = UserProfile.objects.get(id=test_emoji["author_id"])
self.assertEqual(author.email, email)
def test_realm_emoji_repr(self) -> None:
realm_emoji = RealmEmoji.objects.get(name="green_tick")
file_name = str(realm_emoji.id) + ".png"
self.assertEqual(
str(realm_emoji),
f"<RealmEmoji(zulip): {realm_emoji.id} green_tick False {file_name}>",
)
def test_upload_exception(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_em*oji", info=emoji_data)
self.assert_json_error(result, "Invalid characters in emoji name")
def test_upload_uppercase_exception(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_EMoji", info=emoji_data)
self.assert_json_error(result, "Invalid characters in emoji name")
def test_missing_name_exception(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/%20", info=emoji_data)
self.assert_json_error(result, "Emoji name is missing")
def test_upload_admins_only(self) -> None:
self.login("othello")
realm = get_realm("zulip")
realm.add_emoji_by_admins_only = True
realm.save()
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_error(result, "Must be an organization administrator")
def test_upload_anyone(self) -> None:
self.login("othello")
realm = get_realm("zulip")
realm.add_emoji_by_admins_only = False
realm.save()
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_success(result)
def test_emoji_upload_by_guest_user(self) -> None:
self.login("polonius")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_error(result, "Not allowed for guest users")
def test_delete(self) -> None:
emoji_author = self.example_user("iago")
self.login_user(emoji_author)
realm_emoji = self.create_test_emoji("my_emoji", emoji_author)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
# We only mark an emoji as deactivated instead of
# removing it from the database.
self.assert_length(emojis, 2)
test_emoji = emojis[str(realm_emoji.id)]
self.assertEqual(test_emoji["deactivated"], True)
def test_delete_no_author(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_success(result)
def test_delete_admins_only(self) -> None:
emoji_author = self.example_user("othello")
self.login_user(emoji_author)
realm = get_realm("zulip")
realm.add_emoji_by_admins_only = True
realm.save()
self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_error(result, "Must be an organization administrator")
def test_delete_admin_or_author(self) -> None:
# If any user in a realm can upload the emoji then the user who
# uploaded it as well as the admin should be able to delete it.
emoji_author = self.example_user("othello")
realm = get_realm("zulip")
realm.add_emoji_by_admins_only = False
realm.save()
self.create_test_emoji("my_emoji_1", emoji_author)
self.login_user(emoji_author)
result = self.client_delete("/json/realm/emoji/my_emoji_1")
self.assert_json_success(result)
self.logout()
self.create_test_emoji("my_emoji_2", emoji_author)
self.login("iago")
result = self.client_delete("/json/realm/emoji/my_emoji_2")
self.assert_json_success(result)
self.logout()
self.create_test_emoji("my_emoji_3", emoji_author)
self.login("cordelia")
result = self.client_delete("/json/realm/emoji/my_emoji_3")
self.assert_json_error(result, "Must be an organization administrator or emoji author")
def test_delete_exception(self) -> None:
self.login("iago")
result = self.client_delete("/json/realm/emoji/invalid_emoji")
self.assert_json_error(result, "Emoji 'invalid_emoji' does not exist")
def test_multiple_upload(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1, get_test_image_file("img.png") as fp2:
result = self.client_post("/json/realm/emoji/my_emoji", {"f1": fp1, "f2": fp2})
self.assert_json_error(result, "You must upload exactly one file.")
def test_emoji_upload_file_size_error(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp:
with self.settings(MAX_EMOJI_FILE_SIZE_MIB=0):
result = self.client_post("/json/realm/emoji/my_emoji", {"file": fp})
self.assert_json_error(result, "Uploaded file is larger than the allowed limit of 0 MiB")
def test_upload_already_existed_emoji(self) -> None:
self.login("iago")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/green_tick", info=emoji_data)
self.assert_json_error(result, "A custom emoji with this name already exists.")
def test_reupload(self) -> None:
# An user should be able to reupload an emoji with same name.
self.login("iago")
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_success(result)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_success(result)
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
self.assert_length(emojis, 3)
def test_failed_file_upload(self) -> None:
self.login("iago")
with mock.patch("zerver.lib.upload.write_local_file", side_effect=Exception()):
with get_test_image_file("img.png") as fp1:
emoji_data = {"f1": fp1}
result = self.client_post("/json/realm/emoji/my_emoji", info=emoji_data)
self.assert_json_error(result, "Image file upload failed.")
def test_check_admin_realm_emoji(self) -> None:
# Test that an user A is able to remove a realm emoji uploaded by him
# and having same name as a deactivated realm emoji uploaded by some
# other user B.
emoji_author_1 = self.example_user("cordelia")
self.create_test_emoji("test_emoji", emoji_author_1)
self.login_user(emoji_author_1)
result = self.client_delete("/json/realm/emoji/test_emoji")
self.assert_json_success(result)
emoji_author_2 = self.example_user("othello")
self.create_test_emoji("test_emoji", emoji_author_2)
self.login_user(emoji_author_2)
result = self.client_delete("/json/realm/emoji/test_emoji")
self.assert_json_success(result)
def test_check_admin_different_realm_emoji(self) -> None:
# Test that two different realm emojis in two different realms but
# having same name can be administered independently.
realm_1 = do_create_realm("test_realm", "test_realm")
emoji_author_1 = do_create_user(
"abc@example.com", password="abc", realm=realm_1, full_name="abc", acting_user=None
)
self.create_test_emoji("test_emoji", emoji_author_1)
emoji_author_2 = self.example_user("othello")
self.create_test_emoji("test_emoji", emoji_author_2)
self.login_user(emoji_author_2)
result = self.client_delete("/json/realm/emoji/test_emoji")
self.assert_json_success(result)
| apache-2.0 |
APerson241/EnterpriseyBot | botreq-status/botreq-status.py | 1 | 7540 | import datetime
import itertools
import mwparserfromhell
import pywikibot
import re
import sys
BOTREQ = "Wikipedia:Bot requests"
BOTOP_CAT = "Wikipedia bot operators"
REPORT_PAGE = "User:EnterpriseyBot/BOTREQ status"
TABLE_HEADER = """<noinclude>{{botnav}}This is a table of current [[WP:BOTREQ|]] discussions, updated automatically by {{user|EnterpriseyBot}}.</noinclude>
{| border="1" class="sortable wikitable plainlinks"
! # !! Title !! Replies !! Last editor !! Date/Time !! Last botop editor !! Date/Time
"""
SUMMARY = "Bot updating BOTREQ status table ({} requests)"
USER = re.compile(r"\[\[User.*?:(.*?)(?:\||(?:\]\]))")
TIMESTAMP = re.compile(r"\d{2}:\d{2}, \d{1,2} [A-Za-z]* \d{4}")
SIGNATURE = re.compile(r"\[\[User.*?\]\].*?\(UTC\)")
SECTION_HEADER = re.compile(r"^==\s*(.+?)\s*==$", flags=re.M)
SIGNATURE_TIME_FORMAT = "%H:%M, %d %B %Y"
TIME_FORMAT_STRING = "%Y-%m-%d, %H:%M"
class Request:
pass
def print_log(what_to_print):
print(datetime.datetime.utcnow().strftime("[%Y-%m-%dT%H:%M:%SZ] ") + what_to_print)
def make_table_row(r):
replies = ('style="background: red;" | ' if r.replies == 0 else '') + str(r.replies)
# Utility function for processing
def take_inner(regex, text):
"""
Given a regex with exactly one capturing group and some text,
return the text after all occurrences of the regex have been
replaced with the group.
Example: take_inner("a(.)a", "aba") == "b"
"""
return re.sub(regex, r"\1", text)
# Row number
row_number = r.row_number
# We'll be putting r.title in a wikilink, so we can't have nested wikilinks
title = take_inner(r"\[\[(?:.+?\|)?(.+?)\]\]", r.title)
# Nested external links also won't work
title = take_inner(r"\[http[^ ]+? (.+?)\]", title)
# Escape some characters in the link target
encodings = {"#": "%23", "<": "%3C", ">": "%3E", "[": "%5B", "]": "%5D", "|": "%7C", "{": "%7B", "}": "%7D"}
target = re.sub("[{}]".format("".join(map(re.escape, encodings.keys()))), lambda match: encodings[match.group(0)], title)
# Remove formatting in the link target
target = take_inner(r"''([^']+)''", take_inner(r"'''([^']+)'''", target))
if type(r.last_edit_time) is datetime.datetime:
old = (datetime.datetime.now() - r.last_edit_time).days > 60
r.last_edit_time = ('style="background: red;" | ' if old else '') + r.last_edit_time.strftime(TIME_FORMAT_STRING)
if type(r.last_botop_time) is datetime.datetime:
r.last_botop_time = r.last_botop_time.strftime(TIME_FORMAT_STRING)
elements = map(unicode, [row_number, target, title, replies, r.last_editor, r.last_edit_time, r.last_botop_editor, r.last_botop_time])
return u"|-\n| {} || [[WP:Bot requests#{}|{}]] || {} || {} || {} || {} || {}".format(*elements)
botop_cache = {}
def is_botop(wiki, username):
if username in botop_cache:
return botop_cache[username]
userpage = pywikibot.Page(wiki, "User:" + username)
result = any(x.title(with_ns=False) == BOTOP_CAT for x in userpage.categories())
botop_cache[username] = result
return result
def main():
print_log("Starting botreq-status at " + datetime.datetime.utcnow().isoformat())
wiki = pywikibot.Site("en", "wikipedia")
#wiki.login() !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
botreq = pywikibot.Page(wiki, BOTREQ)
page_content = botreq.text
section_headers = list(SECTION_HEADER.finditer(page_content))
# If it's not a level-2 header, the char before a match will be "="
section_headers = list(filter(lambda h:page_content[h.start(0) - 1] != "=",
section_headers))
# Now, build our list of sections
sections = []
for i, section_header_match in enumerate(section_headers):
if i + 1 < len(section_headers):
next_section_header = section_headers[i + 1]
next_section_start = next_section_header.start(0)
else:
next_section_start = len(page_content) + 1
this_section_end = next_section_start - 1
this_section_start = section_header_match.end(0)
section_content = page_content[this_section_start:this_section_end]
section_content = section_content.strip()
section_header = section_header_match.group(1).strip()
# In the event of duplicates, use "=" to flag duplication
while section_header in sections:
section_header = "=" + section_header
sections.append((section_header, section_content))
def section_to_request(enumerated_section_tuple):
enum_number, section_tuple = enumerated_section_tuple
section_header, section_wikitext = section_tuple
section = mwparserfromhell.parse(section_wikitext)
r = Request()
r.row_number = enum_number + 1
r.title = section_header
r.replies = section.count(u"(UTC)") - 1
signatures = []
for index, each_node in enumerate(section.nodes):
if type(each_node) == mwparserfromhell.nodes.text.Text and "(UTC)" in each_node:
# Get the last timestamp-looking thing (trick from http://stackoverflow.com/a/2988680/1757964)
for timestamp_match in TIMESTAMP.finditer(str(each_node)): pass
try:
timestamp = datetime.datetime.strptime(timestamp_match.group(0), SIGNATURE_TIME_FORMAT)
except ValueError:
timestamp = "{{unknown}}"
# Use the last user talk page link before the timestamp
for user_index in itertools.count(index - 1, -1):
user = USER.search(str(section.get(user_index)))
if user:
user = user.group(1)
break
# Check for user renames/redirects
user_page = pywikibot.Page(wiki, "User:" + user)
if user_page.isRedirectPage():
redirect_text = user_page.get(get_redirect=True)
user_wikicode = mwparserfromhell.parse(redirect_text)
redirect_link = user_wikicode.filter_wikilinks()[0]
user = redirect_link.title.split(":")[1]
signatures.append((user, timestamp))
# Process usernames by removing anchors
signatures = [(x.partition('#')[0], y) for x, y in signatures]
# Default values for everything
r.last_editor, r.last_edit_time = r.last_botop_editor, r.last_botop_time = "{{no result|None}}", "{{n/a}}"
if signatures:
r.last_editor, r.last_edit_time = signatures[-1]
for user, timestamp in reversed(signatures):
if is_botop(wiki, user):
r.last_botop_editor, r.last_botop_time = user, timestamp
break
return r
# Why enumerate? Because we need row numbers in the table
requests = map(section_to_request, enumerate(sections))
num_requests = len(list(requests))
print_log("Parsed BOTREQ and made a list of {} requests.".format(num_requests))
table_rows = map(make_table_row, requests)
table = "\n".join(table_rows) + "\n|}"
wikitext = TABLE_HEADER + table
report_page = pywikibot.Page(wiki, REPORT_PAGE)
report_page.text = wikitext
print(wikitext)
#report_page.save(summary=SUMMARY.format(num_requests))
print_log("Saved {}.".format(REPORT_PAGE))
if __name__ == "__main__":
main()
| mit |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/account/report/account_print_overdue.py | 380 | 3907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.osv import osv
class Overdue(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Overdue, self).__init__(cr, uid, name, context=context)
ids = context.get('active_ids')
partner_obj = self.pool['res.partner']
docs = partner_obj.browse(cr, uid, ids, context)
due = {}
paid = {}
mat = {}
for partner in docs:
due[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['debit'] or 0) or (y['account_id']['type'] == 'payable' and y['credit'] * -1 or 0)), self._lines_get(partner), 0)
paid[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['credit'] or 0) or (y['account_id']['type'] == 'payable' and y['debit'] * -1 or 0)), self._lines_get(partner), 0)
mat[partner.id] = reduce(lambda x, y: x + (y['debit'] - y['credit']), filter(lambda x: x['date_maturity'] < time.strftime('%Y-%m-%d'), self._lines_get(partner)), 0)
addresses = self.pool['res.partner']._address_display(cr, uid, ids, None, None)
self.localcontext.update({
'docs': docs,
'time': time,
'getLines': self._lines_get,
'tel_get': self._tel_get,
'message': self._message,
'due': due,
'paid': paid,
'mat': mat,
'addresses': addresses
})
self.context = context
def _tel_get(self,partner):
if not partner:
return False
res_partner = self.pool['res.partner']
addresses = res_partner.address_get(self.cr, self.uid, [partner.id], ['invoice'])
adr_id = addresses and addresses['invoice'] or False
if adr_id:
adr=res_partner.read(self.cr, self.uid, [adr_id])[0]
return adr['phone']
else:
return partner.phone or False
return False
def _lines_get(self, partner):
moveline_obj = self.pool['account.move.line']
movelines = moveline_obj.search(self.cr, self.uid,
[('partner_id', '=', partner.id),
('account_id.type', 'in', ['receivable', 'payable']),
('state', '<>', 'draft'), ('reconcile_id', '=', False)])
movelines = moveline_obj.browse(self.cr, self.uid, movelines)
return movelines
def _message(self, obj, company):
company_pool = self.pool['res.company']
message = company_pool.browse(self.cr, self.uid, company.id, {'lang':obj.lang}).overdue_msg
return message.split('\n')
class report_overdue(osv.AbstractModel):
_name = 'report.account.report_overdue'
_inherit = 'report.abstract_report'
_template = 'account.report_overdue'
_wrapped_report_class = Overdue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/distutils/version.py | 259 | 11433 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| mit |
UstadMobile/exelearning-ustadmobile-work | twisted/web/test/test_flatten.py | 33 | 19136 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the flattening portion of L{twisted.web.template}, implemented in
L{twisted.web._flatten}.
"""
import sys
import traceback
from xml.etree.cElementTree import XML
from zope.interface import implements, implementer
from twisted.trial.unittest import TestCase
from twisted.test.testutils import XMLAssertionMixin
from twisted.internet.defer import passthru, succeed, gatherResults
from twisted.web.iweb import IRenderable
from twisted.web.error import UnfilledSlot, UnsupportedType, FlattenerError
from twisted.web.template import tags, Tag, Comment, CDATA, CharRef, slot
from twisted.web.template import Element, renderer, TagLoader, flattenString
from twisted.web.test._util import FlattenTestCase
class OrderedAttributes(object):
"""
An L{OrderedAttributes} is a stand-in for the L{Tag.attributes} dictionary
that orders things in a deterministic order. It doesn't do any sorting, so
whatever order the attributes are passed in, they will be returned.
@ivar attributes: The result of a L{dict}C{.items} call.
@type attributes: L{list} of 2-L{tuples}
"""
def __init__(self, attributes):
self.attributes = attributes
def iteritems(self):
"""
Like L{dict}C{.iteritems}.
@return: an iterator
@rtype: list iterator
"""
return iter(self.attributes)
class TestSerialization(FlattenTestCase, XMLAssertionMixin):
"""
Tests for flattening various things.
"""
def test_nestedTags(self):
"""
Test that nested tags flatten correctly.
"""
return self.assertFlattensTo(
tags.html(tags.body('42'), hi='there'),
'<html hi="there"><body>42</body></html>')
def test_serializeString(self):
"""
Test that strings will be flattened and escaped correctly.
"""
return gatherResults([
self.assertFlattensTo('one', 'one'),
self.assertFlattensTo('<abc&&>123', '<abc&&>123'),
])
def test_serializeSelfClosingTags(self):
"""
The serialized form of a self-closing tag is C{'<tagName />'}.
"""
return self.assertFlattensTo(tags.img(), '<img />')
def test_serializeAttribute(self):
"""
The serialized form of attribute I{a} with value I{b} is C{'a="b"'}.
"""
self.assertFlattensImmediately(tags.img(src='foo'),
'<img src="foo" />')
def test_serializedMultipleAttributes(self):
"""
Multiple attributes are separated by a single space in their serialized
form.
"""
tag = tags.img()
tag.attributes = OrderedAttributes([("src", "foo"), ("name", "bar")])
self.assertFlattensImmediately(tag, '<img src="foo" name="bar" />')
def checkAttributeSanitization(self, wrapData, wrapTag):
"""
Common implementation of L{test_serializedAttributeWithSanitization}
and L{test_serializedDeferredAttributeWithSanitization},
L{test_serializedAttributeWithTransparentTag}.
@param wrapData: A 1-argument callable that wraps around the
attribute's value so other tests can customize it.
@param wrapData: callable taking L{bytes} and returning something
flattenable
@param wrapTag: A 1-argument callable that wraps around the outer tag
so other tests can customize it.
@type wrapTag: callable taking L{Tag} and returning L{Tag}.
"""
self.assertFlattensImmediately(
wrapTag(tags.img(src=wrapData("<>&\""))),
'<img src="<>&"" />')
def test_serializedAttributeWithSanitization(self):
"""
Attribute values containing C{"<"}, C{">"}, C{"&"}, or C{'"'} have
C{"<"}, C{">"}, C{"&"}, or C{"""} substituted for those
bytes in the serialized output.
"""
self.checkAttributeSanitization(passthru, passthru)
def test_serializedDeferredAttributeWithSanitization(self):
"""
Like L{test_serializedAttributeWithSanitization}, but when the contents
of the attribute are in a L{Deferred
<twisted.internet.defer.Deferred>}.
"""
self.checkAttributeSanitization(succeed, passthru)
def test_serializedAttributeWithSlotWithSanitization(self):
"""
Like L{test_serializedAttributeWithSanitization} but with a slot.
"""
toss = []
self.checkAttributeSanitization(
lambda value: toss.append(value) or slot("stuff"),
lambda tag: tag.fillSlots(stuff=toss.pop())
)
def test_serializedAttributeWithTransparentTag(self):
"""
Attribute values which are supplied via the value of a C{t:transparent}
tag have the same subsitution rules to them as values supplied
directly.
"""
self.checkAttributeSanitization(tags.transparent, passthru)
def test_serializedAttributeWithTransparentTagWithRenderer(self):
"""
Like L{test_serializedAttributeWithTransparentTag}, but when the
attribute is rendered by a renderer on an element.
"""
class WithRenderer(Element):
def __init__(self, value, loader):
self.value = value
super(WithRenderer, self).__init__(loader)
@renderer
def stuff(self, request, tag):
return self.value
toss = []
self.checkAttributeSanitization(
lambda value: toss.append(value) or
tags.transparent(render="stuff"),
lambda tag: WithRenderer(toss.pop(), TagLoader(tag))
)
def test_serializedAttributeWithRenderable(self):
"""
Like L{test_serializedAttributeWithTransparentTag}, but when the
attribute is a provider of L{IRenderable} rather than a transparent
tag.
"""
@implementer(IRenderable)
class Arbitrary(object):
def __init__(self, value):
self.value = value
def render(self, request):
return self.value
self.checkAttributeSanitization(Arbitrary, passthru)
def checkTagAttributeSerialization(self, wrapTag):
"""
Common implementation of L{test_serializedAttributeWithTag} and
L{test_serializedAttributeWithDeferredTag}.
@param wrapTag: A 1-argument callable that wraps around the attribute's
value so other tests can customize it.
@param wrapTag: callable taking L{Tag} and returning something
flattenable
"""
innerTag = tags.a('<>&"')
outerTag = tags.img(src=wrapTag(innerTag))
outer = self.assertFlattensImmediately(
outerTag,
'<img src="<a>&lt;&gt;&amp;"</a>" />')
inner = self.assertFlattensImmediately(
innerTag, '<a><>&"</a>')
# Since the above quoting is somewhat tricky, validate it by making sure
# that the main use-case for tag-within-attribute is supported here: if
# we serialize a tag, it is quoted *such that it can be parsed out again
# as a tag*.
self.assertXMLEqual(XML(outer).attrib['src'], inner)
def test_serializedAttributeWithTag(self):
"""
L{Tag} objects which are serialized within the context of an attribute
are serialized such that the text content of the attribute may be
parsed to retrieve the tag.
"""
self.checkTagAttributeSerialization(passthru)
def test_serializedAttributeWithDeferredTag(self):
"""
Like L{test_serializedAttributeWithTag}, but when the L{Tag} is in a
L{Deferred <twisted.internet.defer.Deferred>}.
"""
self.checkTagAttributeSerialization(succeed)
def test_serializedAttributeWithTagWithAttribute(self):
"""
Similar to L{test_serializedAttributeWithTag}, but for the additional
complexity where the tag which is the attribute value itself has an
attribute value which contains bytes which require substitution.
"""
flattened = self.assertFlattensImmediately(
tags.img(src=tags.a(href='<>&"')),
'<img src="<a href='
'"&lt;&gt;&amp;&quot;">'
'</a>" />')
# As in checkTagAttributeSerialization, belt-and-suspenders:
self.assertXMLEqual(XML(flattened).attrib['src'],
'<a href="<>&""></a>')
def test_serializeComment(self):
"""
Test that comments are correctly flattened and escaped.
"""
return self.assertFlattensTo(Comment('foo bar'), '<!--foo bar-->'),
def test_commentEscaping(self):
"""
The data in a L{Comment} is escaped and mangled in the flattened output
so that the result is a legal SGML and XML comment.
SGML comment syntax is complicated and hard to use. This rule is more
restrictive, and more compatible:
Comments start with <!-- and end with --> and never contain -- or >.
Also by XML syntax, a comment may not end with '-'.
@see: U{http://www.w3.org/TR/REC-xml/#sec-comments}
"""
def verifyComment(c):
self.assertTrue(
c.startswith('<!--'),
"%r does not start with the comment prefix" % (c,))
self.assertTrue(
c.endswith('-->'),
"%r does not end with the comment suffix" % (c,))
# If it is shorter than 7, then the prefix and suffix overlap
# illegally.
self.assertTrue(
len(c) >= 7,
"%r is too short to be a legal comment" % (c,))
content = c[4:-3]
self.assertNotIn('--', content)
self.assertNotIn('>', content)
if content:
self.assertNotEqual(content[-1], '-')
results = []
for c in [
'',
'foo---bar',
'foo---bar-',
'foo>bar',
'foo-->bar',
'----------------',
]:
d = flattenString(None, Comment(c))
d.addCallback(verifyComment)
results.append(d)
return gatherResults(results)
def test_serializeCDATA(self):
"""
Test that CDATA is correctly flattened and escaped.
"""
return gatherResults([
self.assertFlattensTo(CDATA('foo bar'), '<![CDATA[foo bar]]>'),
self.assertFlattensTo(
CDATA('foo ]]> bar'),
'<![CDATA[foo ]]]]><![CDATA[> bar]]>'),
])
def test_serializeUnicode(self):
"""
Test that unicode is encoded correctly in the appropriate places, and
raises an error when it occurs in inappropriate place.
"""
snowman = u'\N{SNOWMAN}'
return gatherResults([
self.assertFlattensTo(snowman, '\xe2\x98\x83'),
self.assertFlattensTo(tags.p(snowman), '<p>\xe2\x98\x83</p>'),
self.assertFlattensTo(Comment(snowman), '<!--\xe2\x98\x83-->'),
self.assertFlattensTo(CDATA(snowman), '<![CDATA[\xe2\x98\x83]]>'),
self.assertFlatteningRaises(
Tag(snowman), UnicodeEncodeError),
self.assertFlatteningRaises(
Tag('p', attributes={snowman: ''}), UnicodeEncodeError),
])
def test_serializeCharRef(self):
"""
A character reference is flattened to a string using the I{&#NNNN;}
syntax.
"""
ref = CharRef(ord(u"\N{SNOWMAN}"))
return self.assertFlattensTo(ref, "☃")
def test_serializeDeferred(self):
"""
Test that a deferred is substituted with the current value in the
callback chain when flattened.
"""
return self.assertFlattensTo(succeed('two'), 'two')
def test_serializeSameDeferredTwice(self):
"""
Test that the same deferred can be flattened twice.
"""
d = succeed('three')
return gatherResults([
self.assertFlattensTo(d, 'three'),
self.assertFlattensTo(d, 'three'),
])
def test_serializeIRenderable(self):
"""
Test that flattening respects all of the IRenderable interface.
"""
class FakeElement(object):
implements(IRenderable)
def render(ign,ored):
return tags.p(
'hello, ',
tags.transparent(render='test'), ' - ',
tags.transparent(render='test'))
def lookupRenderMethod(ign, name):
self.assertEqual(name, 'test')
return lambda ign, node: node('world')
return gatherResults([
self.assertFlattensTo(FakeElement(), '<p>hello, world - world</p>'),
])
def test_serializeSlots(self):
"""
Test that flattening a slot will use the slot value from the tag.
"""
t1 = tags.p(slot('test'))
t2 = t1.clone()
t2.fillSlots(test='hello, world')
return gatherResults([
self.assertFlatteningRaises(t1, UnfilledSlot),
self.assertFlattensTo(t2, '<p>hello, world</p>'),
])
def test_serializeDeferredSlots(self):
"""
Test that a slot with a deferred as its value will be flattened using
the value from the deferred.
"""
t = tags.p(slot('test'))
t.fillSlots(test=succeed(tags.em('four>')))
return self.assertFlattensTo(t, '<p><em>four></em></p>')
def test_unknownTypeRaises(self):
"""
Test that flattening an unknown type of thing raises an exception.
"""
return self.assertFlatteningRaises(None, UnsupportedType)
# Use the co_filename mechanism (instead of the __file__ mechanism) because
# it is the mechanism traceback formatting uses. The two do not necessarily
# agree with each other. This requires a code object compiled in this file.
# The easiest way to get a code object is with a new function. I'll use a
# lambda to avoid adding anything else to this namespace. The result will
# be a string which agrees with the one the traceback module will put into a
# traceback for frames associated with functions defined in this file.
HERE = (lambda: None).func_code.co_filename
class FlattenerErrorTests(TestCase):
"""
Tests for L{FlattenerError}.
"""
def test_string(self):
"""
If a L{FlattenerError} is created with a string root, up to around 40
bytes from that string are included in the string representation of the
exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), ['abc123xyz'], [])),
"Exception while flattening:\n"
" 'abc123xyz'\n"
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), ['0123456789' * 10], [])),
"Exception while flattening:\n"
" '01234567890123456789<...>01234567890123456789'\n"
"RuntimeError: reason\n")
def test_unicode(self):
"""
If a L{FlattenerError} is created with a unicode root, up to around 40
characters from that string are included in the string representation
of the exception.
"""
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'abc\N{SNOWMAN}xyz'], [])),
"Exception while flattening:\n"
" u'abc\\u2603xyz'\n" # Codepoint for SNOWMAN
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'01234567\N{SNOWMAN}9' * 10],
[])),
"Exception while flattening:\n"
" u'01234567\\u2603901234567\\u26039<...>01234567\\u2603901234567"
"\\u26039'\n"
"RuntimeError: reason\n")
def test_renderable(self):
"""
If a L{FlattenerError} is created with an L{IRenderable} provider root,
the repr of that object is included in the string representation of the
exception.
"""
class Renderable(object):
implements(IRenderable)
def __repr__(self):
return "renderable repr"
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [Renderable()], [])),
"Exception while flattening:\n"
" renderable repr\n"
"RuntimeError: reason\n")
def test_tag(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance with source
location information, the source location is included in the string
representation of the exception.
"""
tag = Tag(
'div', filename='/foo/filename.xhtml', lineNumber=17, columnNumber=12)
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [tag], [])),
"Exception while flattening:\n"
" File \"/foo/filename.xhtml\", line 17, column 12, in \"div\"\n"
"RuntimeError: reason\n")
def test_tagWithoutLocation(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance without source
location information, only the tagName is included in the string
representation of the exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [Tag('span')], [])),
"Exception while flattening:\n"
" Tag <span>\n"
"RuntimeError: reason\n")
def test_traceback(self):
"""
If a L{FlattenerError} is created with traceback frames, they are
included in the string representation of the exception.
"""
# Try to be realistic in creating the data passed in for the traceback
# frames.
def f():
g()
def g():
raise RuntimeError("reason")
try:
f()
except RuntimeError, exc:
# Get the traceback, minus the info for *this* frame
tbinfo = traceback.extract_tb(sys.exc_info()[2])[1:]
else:
self.fail("f() must raise RuntimeError")
self.assertEqual(
str(FlattenerError(exc, [], tbinfo)),
"Exception while flattening:\n"
" File \"%s\", line %d, in f\n"
" g()\n"
" File \"%s\", line %d, in g\n"
" raise RuntimeError(\"reason\")\n"
"RuntimeError: reason\n" % (
HERE, f.func_code.co_firstlineno + 1,
HERE, g.func_code.co_firstlineno + 1))
| gpl-2.0 |
mluke93/osf.io | tests/test_serializers.py | 8 | 13133 | # -*- coding: utf-8 -*-
from nose.tools import * # noqa (PEP8 asserts)
from tests.factories import (
ProjectFactory,
UserFactory,
RegistrationFactory,
NodeFactory,
NodeLogFactory,
CollectionFactory,
)
from tests.base import OsfTestCase
from framework.auth import Auth
from framework import utils as framework_utils
from website.project.views.node import _get_summary, _view_project, _serialize_node_search, _get_children
from website.views import _render_node
from website.profile import utils
from website.util import permissions
class TestNodeSerializers(OsfTestCase):
# Regression test for #489
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_get_summary_private_node_should_include_id_and_primary_boolean_reg_and_fork(self):
user = UserFactory()
# user cannot see this node
node = ProjectFactory(is_public=False)
result = _get_summary(
node, auth=Auth(user),
primary=True,
link_id=None
)
# serialized result should have id and primary
assert_equal(result['summary']['id'], node._primary_key)
assert_true(result['summary']['primary'], True)
assert_equal(result['summary']['is_registration'], node.is_registration)
assert_equal(result['summary']['is_fork'], node.is_fork)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/668
def test_get_summary_for_registration_uses_correct_date_format(self):
reg = RegistrationFactory()
res = _get_summary(reg, auth=Auth(reg.creator))
assert_equal(res['summary']['registered_date'],
reg.registered_date.strftime('%Y-%m-%d %H:%M UTC'))
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/858
def test_get_summary_private_registration_should_include_is_registration(self):
user = UserFactory()
# non-contributor cannot see private registration of public project
node = ProjectFactory(is_public=True)
reg = RegistrationFactory(project=node, user=node.creator)
res = _get_summary(reg, auth=Auth(user))
# serialized result should have is_registration
assert_true(res['summary']['is_registration'])
def test_render_node(self):
node = ProjectFactory()
res = _render_node(node)
assert_equal(res['title'], node.title)
assert_equal(res['id'], node._primary_key)
assert_equal(res['url'], node.url)
assert_equal(res['api_url'], node.api_url)
assert_equal(res['primary'], node.primary)
assert_equal(res['date_modified'], framework_utils.iso8601format(node.date_modified))
assert_equal(res['category'], 'project')
assert_false(res['is_registration'])
assert_false(res['is_retracted'])
def test_render_node_returns_permissions(self):
node = ProjectFactory()
admin = UserFactory()
node.add_contributor(admin, auth=Auth(node.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
writer = UserFactory()
node.add_contributor(writer, auth=Auth(node.creator),
permissions=permissions.expand_permissions(permissions.WRITE))
node.save()
res_admin = _render_node(node, Auth(admin))
assert_equal(res_admin['permissions'], 'admin')
res_writer = _render_node(node, Auth(writer))
assert_equal(res_writer['permissions'], 'write')
# https://openscience.atlassian.net/browse/OSF-4618
def test_get_children_only_returns_child_nodes_with_admin_permissions(self):
user = UserFactory()
admin_project = ProjectFactory()
admin_project.add_contributor(user, auth=Auth(admin_project.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_project.save()
admin_component = NodeFactory(parent=admin_project)
admin_component.add_contributor(user, auth=Auth(admin_component.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_component.save()
read_and_write = NodeFactory(parent=admin_project)
read_and_write.add_contributor(user, auth=Auth(read_and_write.creator),
permissions=permissions.expand_permissions(permissions.WRITE))
read_and_write.save()
read_only = NodeFactory(parent=admin_project)
read_only.add_contributor(user, auth=Auth(read_only.creator),
permissions=permissions.expand_permissions(permissions.READ))
read_only.save()
non_contributor = NodeFactory(parent=admin_project)
components = _get_children(admin_project, Auth(user))
assert_equal(len(components), 1)
def test_get_summary_private_fork_should_include_is_fork(self):
user = UserFactory()
# non-contributor cannot see private fork of public project
node = ProjectFactory(is_public=True)
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = _get_summary(
fork, auth=Auth(user),
primary=True,
link_id=None
)
# serialized result should have is_fork
assert_true(res['summary']['is_fork'])
def test_get_summary_private_fork_private_project_should_include_is_fork(self):
# contributor on a private project
user = UserFactory()
node = ProjectFactory(is_public=False)
node.add_contributor(user)
# contributor cannot see private fork of this project
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = _get_summary(
fork, auth=Auth(user),
primary=True,
link_id=None
)
# serialized result should have is_fork
assert_false(res['summary']['can_view'])
assert_true(res['summary']['is_fork'])
def test_serialize_node_search_returns_only_visible_contributors(self):
node = NodeFactory()
non_visible_contributor = UserFactory()
node.add_contributor(non_visible_contributor, visible=False)
serialized_node = _serialize_node_search(node)
assert_equal(serialized_node['firstAuthor'], node.visible_contributors[0].family_name)
assert_equal(len(node.visible_contributors), 1)
assert_false(serialized_node['etal'])
class TestViewProject(OsfTestCase):
def setUp(self):
super(TestViewProject, self).setUp()
self.user = UserFactory()
self.node = ProjectFactory(creator=self.user)
# related to https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_view_project_pointer_count_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = self.node # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in a organizer collection
folder = CollectionFactory(creator=pointed_project.creator)
folder.add_pointer(pointed_project, Auth(pointed_project.creator), save=True)
result = _view_project(pointed_project, Auth(pointed_project.creator))
# pointer_project is included in count, but not folder
assert_equal(result['node']['points'], 1)
def test_view_project_pending_registration_for_admin_contributor_does_contain_cancel_link(self):
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(self.user))
assert_not_equal(result['node']['disapproval_link'], '')
assert_in('/?token=', result['node']['disapproval_link'])
pending_reg.remove()
def test_view_project_pending_registration_for_write_contributor_does_not_contain_cancel_link(self):
write_user = UserFactory()
self.node.add_contributor(write_user, permissions=permissions.WRITE,
auth=Auth(self.user), save=True)
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(write_user))
assert_equal(result['node']['disapproval_link'], '')
pending_reg.remove()
class TestNodeLogSerializers(OsfTestCase):
def test_serialize_node_for_logs(self):
node = NodeFactory()
d = node.serialize()
assert_equal(d['id'], node._primary_key)
assert_equal(d['category'], node.category_display)
assert_equal(d['node_type'], node.project_or_component)
assert_equal(d['url'], node.url)
assert_equal(d['title'], node.title)
assert_equal(d['api_url'], node.api_url)
assert_equal(d['is_public'], node.is_public)
assert_equal(d['is_registration'], node.is_registration)
class TestAddContributorJson(OsfTestCase):
def setUp(self):
super(TestAddContributorJson, self).setUp()
self.user = UserFactory()
self.profile = self.user.profile_url
self.user_id = self.user._primary_key
self.fullname = self.user.fullname
self.username = self.user.username
self.jobs = [{
'institution': 'School of Lover Boys',
'department': 'Fancy Patter',
'title': 'Lover Boy',
'start': None,
'end': None,
}]
self.schools = [{
'degree': 'Vibing',
'institution': 'Queens University',
'department': '',
'location': '',
'start': None,
'end': None,
}]
def test_add_contributor_json(self):
# User with no employment or education info listed
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['gravatar_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_edu(self):
# Test user with only education information
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['gravatar_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job(self):
# Test user with only employment information
self.user.jobs = self.jobs
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['gravatar_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job_and_edu(self):
# User with both employment and education information
self.user.jobs = self.jobs
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['gravatar_url'])
assert_equal(user_info['profile_url'], self.profile)
| apache-2.0 |
carmine/open-kilda | services/traffexam/kilda/traffexam/service.py | 2 | 7291 | # Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import json
import time
import threading
import subprocess
import pyroute2
from kilda.traffexam import context as context_module
from kilda.traffexam import exc
from kilda.traffexam import model
from kilda.traffexam import system
class Abstract(system.NSIPDBMixin, context_module.ContextConsumer):
def __init__(self, context):
super().__init__(context)
self._pool = {}
self._lock = threading.Lock()
def create(self, subject):
with self._lock:
try:
item = self._create(subject)
except Exception as e:
raise exc.ServiceCreateError(self, subject) from e
self._pool[self.key(item)] = item
return item
def list(self):
return tuple(self._pool.values())
def lookup(self, key):
try:
item = self._pool[key]
except KeyError:
raise exc.ServiceLookupError(self, key) from None
return item
def delete(self, key, ignore_missing=False):
with self._lock:
try:
subject = self._pool.pop(key)
except KeyError:
if not ignore_missing:
raise exc.ServiceLookupError(self, key) from None
return
try:
self._delete(subject)
except Exception as e:
self._pool[key] = subject
raise exc.ServiceDeleteError(self, key, subject) from e
def _create(self, subject):
raise NotImplementedError
def _delete(self, subject):
raise NotImplementedError
def key(self, subject):
raise NotImplementedError
def get_gw_iface(self):
return self.context.shared_registry.fetch(system.VEthPair).ns
class VLANService(Abstract):
def key(self, subject):
return subject.tag
def _create(self, subject):
tag = self.key(subject)
ifname = self.make_iface_name(tag)
ip = self.get_ipdb()
with ip.create(
kind='vlan', ifname=ifname, vlan_id=tag,
link=self.get_gw_iface()) as iface:
iface.up()
iface = ip.interfaces[ifname].ro
subject.set_iface(model.NetworkIface(
ifname, index=iface.index, vlan_tag=tag))
return subject
def _delete(self, subject):
tag = self.key(subject)
ifname = self.make_iface_name(tag)
with self.get_ipdb().interfaces[ifname] as iface:
iface.remove()
@staticmethod
def make_iface_name(tag):
return 'vlan.{}'.format(tag)
class IpAddressService(Abstract):
def key(self, subject):
return subject.idnr
def _create(self, subject):
if subject.iface is None:
subject.iface = model.NetworkIface(self.get_gw_iface())
name = subject.iface.get_ipdb_key()
with self.get_ipdb().interfaces[name] as iface:
iface.add_ip(subject.address, mask=subject.prefix)
return subject
def _delete(self, subject):
name = subject.iface.get_ipdb_key()
with self.get_ipdb().interfaces[name] as iface:
iface.del_ip(subject.address, mask=subject.prefix)
class EndpointService(Abstract):
def key(self, subject):
return subject.idnr
def get_report(self, key):
entity = self.lookup(key)
proc = entity.proc
if proc.poll() is None:
return None
out = []
for path in (
self.make_report_file_name(entity),
self.make_error_file_name(entity)):
with open(str(path), 'rt') as stream:
out.append(stream.read())
report, error = out
report = json.loads(report)
return report, error
def _create(self, subject):
if isinstance(subject, model.ConsumerEndpoint):
self._create_consumer(subject)
elif isinstance(subject, model.ProducerEndpoint):
self._create_producer(subject)
else:
raise ValueError('Unsupported payload {!r}'.format(subject))
return subject
def _delete(self, subject):
for file in (
self.make_report_file_name(subject),
self.make_error_file_name(subject)):
try:
file.unlink()
except FileNotFoundError:
pass
try:
for attempt in range(3):
if subject.proc.poll() is not None:
break
subject.proc.terminate()
time.sleep(1)
else:
subject.proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
subject.proc.wait()
if isinstance(subject, model.ConsumerEndpoint):
subject.bind_address.free_port(subject.bind_port)
def _create_consumer(self, subject):
subject.bind_port = subject.bind_address.alloc_port()
cmd = self.make_cmd_common_part(subject)
cmd += [
'--server',
'--one-off',
'--port={}'.format(subject.bind_port)]
self.run_iperf(subject, cmd)
def _create_producer(self, subject):
cmd = self.make_cmd_common_part(subject)
cmd += [
'--client={}'.format(subject.remote_address.address),
'--port={}'.format(subject.remote_address.port),
'--bandwidth={}'.format(subject.bandwidth * 1024),
'--time={}'.format(subject.time),
'--interval=1',
'--udp']
self.run_iperf(subject, cmd)
def make_cmd_common_part(self, subject):
cmd = [
'ip', 'netns', 'exec', self.context.make_network_namespace_name(),
'iperf3', '--json', '--interval=1']
if subject.bind_address is not None:
cmd.append('--bind={}'.format(subject.bind_address.address))
return cmd
def run_iperf(self, subject, cmd):
report = open(str(self.make_report_file_name(subject)), 'wb')
err = open(str(self.make_error_file_name(subject)), 'wb')
proc = subprocess.Popen(cmd, stdout=report, stderr=err)
subject.set_proc(proc)
self.context.children.add(proc)
def make_report_file_name(self, subject):
return self.context.path('{}.json'.format(subject.idnr))
def make_error_file_name(self, subject):
return self.context.path('{}.err'.format(subject.idnr))
class Adapter(object):
def __init__(self, context):
self.address = IpAddressService(context)
self.vlan = VLANService(context)
self.endpoint = EndpointService(context)
| apache-2.0 |
EnviroCentre/patterns-toolbox | version.py | 1 | 1466 | # -*- coding: utf-8 -*-
from subprocess import check_output, CalledProcessError
from codecs import open
from os import path
TAG_PREFIX = 'v'
def update():
here = path.abspath(path.dirname(__file__))
try:
git_args = ['git',
'describe',
'--tags',
'--always']
v_elements = check_output(git_args, universal_newlines=True).split('-')
tag = v_elements[0]
if not tag.startswith(TAG_PREFIX):
raise ValueError("Tag `{}` must start with `{}`.".format(tag, TAG_PREFIX))
version = tag[len(TAG_PREFIX):].strip()
if len(v_elements) == 1:
v_str = version
dist = 0
else:
dist = v_elements[1].strip()
v_str = '+'.join([version, dist])
with open(path.join(here, 'VERSION'), mode='w', encoding='utf-8') as v_file:
v_file.write(v_str)
return {'tag': version,
'dist': dist,
'str': v_str}
except CalledProcessError:
# If we're not in a git repo, get the version from the VERSION file
v_str = open(path.join(here, 'VERSION'), encoding='utf-8').read().strip()
v_elements = v_str.split('-')
tag = v_elements[0]
if len(v_elements) == 1:
dist = 0
else:
dist = v_elements[1]
return {'tag': tag,
'dist': dist,
'str': v_str}
| apache-2.0 |
erkrishna9/odoo | addons/google_calendar/google_calendar.py | 12 | 47478 | # -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and self.GG.status:
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from OpenERP, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in OpenERP")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=event.duration) + timedelta(days=isCreating and 1 or 0), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
attendee_list.append({
'email': attendee.email or 'NoEmail@mail.com',
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': 'UTC'
},
"end": {
type: final_date,
vstype: None,
'timeZone': 'UTC'
},
"attendees": attendee_list,
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return status_response(st) and content['id'] or False
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
partner_record = [(4, myPartnerID)]
result = {}
if single_event_dict.get('attendees', False):
for google_attendee in single_event_dict['attendees']:
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == google_attendee['email']:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found', False):
continue
if self.get_need_synchro_attendee(cr, uid, context=context):
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', google_attendee['email'])], context=context)
if not attendee_id:
data = {
'email': google_attendee['email'],
'customer': False,
'name': google_attendee.get("displayName", False) or google_attendee['email']
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data, context=context)
return True
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
context_with_time = dict(context.copy(), ask_time=True)
current_google = self.get_calendar_primary_id(cr, uid, context=context_with_time)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, uid, context=context) and not self.get_disable_since_synchro(cr, uid, context=context):
lastSync = self.get_last_sync_date(cr, uid, context)
_logger.info("Calendar Synchro - MODE SINCE_MODIFIED : %s !" % lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
else:
lastSync = False
_logger.info("Calendar Synchro - MODE FULL SYNCHRO FORCED")
else:
current_user.write({'google_calendar_cal_id': current_google}, context=context)
lastSync = False
_logger.info("Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID")
new_ids = []
new_ids += self.create_new_events(cr, uid, context=context)
new_ids += self.bind_recurring_events_to_google(cr, uid, context)
res = self.update_events(cr, uid, lastSync, context)
current_user.write({'google_calendar_last_sync_date': context_with_time.get('ask_time')}, context=context)
return {
"status": res and "need_refresh" or "no_new_event_form_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(e.read())
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google are lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13)
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and OpenERP Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'date', 'date_end', 'stop', 'attendee_ids', 'location', 'class', 'active']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('OpenERP Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('OpenERP Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)
| agpl-3.0 |
metalpen1984/SciTool_Py | gridtransU.py | 1 | 15622 | #!/usr/bin/python
#Purpose: 1. To convert the grid data of following format to each other:
# .sa .xyz .asc .pfb
# (simple ascii, xyz, ascii grid, parflow binary)
# 2. To simply plot the figure from the single output file.
#ChangeLog: 20150429: Changing the reading method of read pfb. Make it read faster.
#
import re,math
import struct
plot_funk=1
try:
import numpy as numpy
except ImportError:
print("You no install numpy? ")
print("You could not use the plot function of this script until you install Numpy")
plot_funk=0
try:
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
from pylab import *
import matplotlib.animation as animation
except ImportError:
print("You could not use the plot function of this script until you install Matplotlib")
plot_funk=0
# -----
# Strip the blank while readline
# -----
#Read and Strip the Blanks
#Default re_pattern : \s
# if len(re_pattern) == 0:
# str_arr=re.split('\s',fdata.readline())
# elif len(re_pattern) == 1:
# str_arr=re.split(re_pattern,fdata.readline())
# else:
# print("Wrong for re_pattern, Bug#001")
# new_arr=[]
# for s in str_arr:
# if s =="":
# pass
# else:
# new_arr.append(s)
# return new_arr
def stripblnk(arr,*num_typ):
new_arr=[]
for i in arr:
if i == "":
pass
else:
if num_typ[0] == 'int':
new_arr.append(int(i))
elif num_typ[0] == 'float':
new_arr.append(float(i))
elif num_typ[0] == '':
new_arr.append(i)
else:
print("WRONG num_typ!")
return new_arr
def tryopen(sourcefile,ag):
try:
opf=open(sourcefile,ag)
return opf
except :
print("No such file.")
return "error"
def checkformat(sourcefile):
fmtt=re.split('\.',sourcefile)
fmt=fmtt[len(fmtt)-1]
return fmt
# -----
# Read .pfb
# -----
def readpfb(sourcefile, if_silence = True):
opf = tryopen(sourcefile,'rb')
if if_silence == False:
print("reading source file {0:s}".format(sourcefile))
t1=struct.unpack('>ddd',opf.read(24))
tn=struct.unpack('>iii',opf.read(12))
td=struct.unpack('>ddd',opf.read(24))
tns=struct.unpack('>i',opf.read(4))
x1,y1,z1=t1
nx,ny,nz=tn
dx,dy,dz=td
ns=tns[0]
result_arr=[[[ 0 for ii in range(nx) ] for jj in range(ny)] for kk in range(nz)]
for isub in range(0,ns):
ix,iy,iz,nnx,nny,nnz,rx,ry,rz=struct.unpack('>9i',opf.read(36))
tmp_total = nnx * nny * nnz
tvalue = struct.unpack('>{0:d}d'.format(tmp_total), opf.read(8*tmp_total))
for k in range(nnz):
for j in range(nny):
for i in range(nnx):
result_arr[k+iz][j+iy][i+ix]=tvalue[ k*(nny*nnx) + j*nnx + i ]
opf.close()
if if_silence == False:
print("Completed reading pfb format from {0}".format(sourcefile))
return result_arr,nx,ny,nz,dx,dy,dz
# -----
# Read .sa
# -----
def readsa(sourcefile):
print("reading source file {0:s}".format(sourcefile))
result_arr=[]
opf = tryopen(sourcefile,'r')
headt=re.split('\s',opf.readline().strip())
head=stripblnk(headt,'int')
nx=int(head[0])
ny=int(head[1])
nz=int(head[2])
for j in range(0,ny):
tmp=[]
for i in range(0,nx):
ans=re.split('\s',opf.readline().strip())
tmp.append(float(ans[0]))
result_arr.append(tmp)
print("Completed reading sa format from {0}".format(sourcefile))
return result_arr,nx,ny,nz
# -----
# Read .dat
# -----
def readdat(sourcefile, str_null="noData", num_null=-999.999, num_pos=[]):
opf = tryopen(sourcefile,'r')
opfchk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
chk_lines = opfchk.readlines()
num_totallines = len(chk_lines)
ncols = 0
num_notnum = 0
for n in range(num_totallines):
line_in = chk_lines[n]
#print line_in
c_first = re.findall("\s",line_in.strip())
if c_first[0] == "#":
num_notnum += 1
else:
ncols = len( re.split("\s",line_in.strip()) )
break
if ncols == 0:
print("something wrong with the input file! (all comments?)")
else:
del opfchk
nrows=num_totallines - num_notnum
result_arr=[[num_null for j in range(nrows)] for i in range(ncols)]
for j in range(0,nrows):
# chk if comment
line_in = opf.readline()
c_first = re.findall(".",line_in.strip())[0]
if c_first == "#":
pass
else:
arr_in = re.split("\s",line_in.strip())
if len(num_pos)==0:
for i in range(ncols):
chk_val = arr_in[i]
if chk_val == str_null:
result_arr[i][j] = num_null
else:
result_arr[i][j] = num_null
else:
for i in num_pos:
chk_val = arr_in[i]
result_arr[i][j] = float(chk_val)
return result_arr
# -----
# Read .csv
# -----
def readcsv(sourcefile, str_null="noData", num_null=-999.999, if_allnum=False):
opf = tryopen(sourcefile,'r')
opfchk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
chk_lines = opfchk.readlines()
num_totallines = len(chk_lines)
ncols = 0
num_notnum = 0
for n in range(num_totallines):
line_in = chk_lines[n]
#print line_in
c_first = re.findall(".",line_in.strip())
if c_first[0] == "#":
num_notnum += 1
else:
ncols = len( re.split(",",line_in.strip()) )
break
if ncols == 0:
print("something wrong with the input file! (all comments?)")
else:
del opfchk
nrows=num_totallines - num_notnum
result_arr=[[num_null for j in range(nrows)] for i in range(ncols)]
for j in range(0,nrows + num_notnum):
# chk if comment
line_in = opf.readline()
c_first = re.findall(".",line_in.strip())[0]
if c_first == "#":
pass
else:
arr_in = re.split(",",line_in.strip())
for i in range(ncols):
chk_val = arr_in[i]
if chk_val == str_null:
result_arr[i][j-num_notnum] = num_null
else:
if if_allnum:
result_arr[i][j-num_notnum] = float(chk_val)
else:
result_arr[i][j-num_notnum] = chk_val
print("read ncol:{0:d}, nrow:{1:d}".format(ncols,nrows))
return result_arr
# -----
# Read .asc (ascii grid from ESRI)
# -----
def readascgrid(sourcefile):
opf = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
ncols=int(re.split('\s',opf.readline().strip())[1])
nrows=int(re.split('\s',opf.readline().strip())[1])
xllcorner=float(re.split('\s',opf.readline().strip())[1])
yllcorner=float(re.split('\s',opf.readline().strip())[1])
cellsize=float(re.split('\s',opf.readline().strip())[1])
nodata_v=float(re.split('\s',opf.readline().strip())[1])
result_arr=[]
for j in range(0,nrows):
valuet=int(re.split('\s',opf.readline().strip()))
result_arr.append(valuet)
print("Completed reading ascii-grid format from {0}".format(sourcefile))
return result_arr,ncols,nrows,xllcorner,yllcorner,cellsize,nodata_v
# -----
# Read .xyz
# -----
def readxyz(sourcefile,*nxny):
chk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(nxny) == 2:
print("Specific nx and ny is indicated.")
ncol=nxny[0]
nrow=nxny[1]
elif len(nxny) == 0:
print("Checking the nx and ny")
count=1
k=chk.readlines()
start_col=float(re.split('\s',k[0].strip())[0])
check = -9999
while check != 0.0:
check_col=float(re.split('\s',k[count].strip())[0])
check=check_col-start_col
if check ==0.0 :
ncol=count
else:
count=count+1
nrow=len(k)/ncol
for j in range(0,nrow):
valuex=[]
for i in range(0,ncol):
tline=re.split('\s',opf.readline().strip())
line=stripblnk(tline,'float')
valuex.append(line[2])
if i == 0 and j == 0:
xll=float(line[0])
print('xll: {0}'.format(xll))
if i == 1 and j == 0:
refx=float(line[0])
print('refx: {0}'.format(refx))
if i == 0 and j == 1:
refy=float(line[1])
print('refy: {0}'.format(refy))
if i == ncol-1 and j == nrow-1:
yll=float(line[1])
print('yll: {0}'.format(yll))
result_arr.append(valuex)
#dx=xll-refx
#dy=refy-yll
print("Completed reading ascii-grid format from {0}".format(sourcefile))
#print result_arr
return result_arr,ncol,nrow,xll,yll,#dx,dy
# -----
# Read Custom 1 col raster
# -----
def read1col(sourcefile,nx,ny,nz,*skiplines):
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(skiplines) == 0:
pass
else:
for m in range(0,skipelines[0]):
opf.readline()
for j in range(0,ny):
tmp=[]
for i in range(0,nx):
ans=re.split('\s',opf.readline().strip())
tmp.append(float(ans[0]))
result_arr.append(tmp)
print("Completed reading sa format from {0}".format(sourcefile))
return result_arr,nx,ny,nz
# -----
# Read Custom 2d grid raster
# -----
def read2d(sourcefile,nx,ny,num_typ,*skiplines):
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(skiplines) == 0:
pass
else:
for m in range(0,skiplines[0]):
opf.readline()
for j in range(0,ny):
ans=re.split('\s',opf.readline().strip())
t_arr=stripblnk(ans,num_typ)
result_arr.append(t_arr)
return result_arr,nx,ny
# -----
# Write from 2D array to 1D array
# -----
# -----
# Write from 2D array to 2D array
# -----
# -----
# Write .xyz
# -----
def writexyz(write_file_name,input_arr,ncols,nrows,xllco,yllco,cellsize,nodata_value):
wtf=open(write_file_name)
for j in range(ncows):
for i in range(ncols):
wtf.write("{0} {1} {2}\n".format(xllco+i*cellsize,yllco+nrows*cellsize-i*cellsize,value[j][i]))
wtf.close()
# -----
# Write .pfb
# -----
def writepfb(write_file_name,input_arr,nx,ny,nz,dx=0,dy=0,dz=0,x=0,y=0,z=0,ns=1,nodata_value=-999.999):
wtf=open(write_file_name,"w")
wtf.write(struct.pack('>3d',x,y,z))
wtf.write(struct.pack('>3i',nx,ny,nz))
wtf.write(struct.pack('>3d',dx,dy,dz))
wtf.write(struct.pack('>1i',ns))
for isub in range(0,ns):
iz,iy,ix=int(z),int(y),int(x)
nnz,nny,nnx=int(nz),int(ny),int(nx)
wtf.write(struct.pack('>3i',0,0,0))
wtf.write(struct.pack('>3i',nx,ny,nz))
wtf.write(struct.pack('>3i',dx,dy,dz))
for i in range(iz,iz+nnz):
for j in range(iy,iy+nny):
for k in range(ix,ix+nnx):
wtf.write(struct.pack('>d',input_arr[i][j][k]))
wtf.close()
# -----
# Write .sa
# -----
def writesa(write_file_name,input_arr,nz,ny,nx):
wtf=open(write_file_name,"w")
wtf.write('{0} {1} {2}\n'.format(nx,ny,nz))
for k in range(0,nz):
for j in range(0,ny):
for i in range(0,nx):
wtf.write(str(input_arr[j][i]) + '\n' )
wtf.close()
# -----
# Write .asc
# -----
def writeasc(write_file_name,input_arr,ncols,nrows,xllco,yllco,cellsize,nodata_v):
wtf=open(write_file_name,'w')
wtf.write("ncols {0}\n".format(ncols))
wtf.write("nrows {0}\n".format(nrows))
wtf.write("xllcorner {0}\n".format(xllco))
wtf.write("yllcorner {0}\n".format(yllco))
wtf.write("cellsize {0}\n".format(cellsize))
wtf.write("NODATA_value {0}\n".format(nodata_v))
for j in range(0,nrows):
for i in range(0,ncols):
for x in input_arr[j]:
wtf.write("{0} ".format(x))
wtf.write("\n")
wtf.close()
# -----
# Write .csv
# -----
def writecsv(file_name, arr_input, arr_head=[]):
wtf = open(file_name, 'w')
if len(arr_head) != 0:
wtf.write("#")
for h in range(len(arr_head)):
wtf.write("{0:s}".format(arr_head[h]))
wtf.write("\n")
num_vars = len(arr_input)
num_length = len(arr_input[0])
for l in range(num_length):
for v in range(num_vars):
if v == 0:
wtf.write("{0:8.5f}".format(arr_input[v][l]))
else:
wtf.write(",{0:8.5f}".format(arr_input[v][l]))
wtf.write("\n")
print("Finishing writing out {0:s}".format(file_name))
wtf.close()
# -----
# Plot in default_im
# -----
def im_subplots(title,ax,figx,ncols,nrows,array,*inver):
figx=plt.figure()
ax=figx.add_subplot(111)
cax=ax.imshow(array)
#cax=ax.imshow('array[0][layer],vmin=%f,vmax=%f' % (float(vmin),float(vmax)))
ax.set_title(title)
ax.set_xlim((-0.5,ncols-0.5))
if len(inver) == 0:
ax.set_ylim((-0.5,nrows-0.5))
else:
ax.set_ylim((nrows-0.5,-0.5))
cbar= figx.colorbar(cax,format='%.5f')
# -----
# Plot .pfb in im
# -----
def pfb_im_subplots(title,ax,figx,array,ncols,nrows,layer,*inver):
figx=plt.figure()
#ax=figx.add_subplot('%d%d%d'%(col,row,pos))
ax=figx.add_subplot(111)
v_max = numpy.amax(array[layer])
v_min = v_max * -1
if v_max == v_min: v_max=v_min+0.5
cax=ax.imshow(array[layer],vmin=v_min,vmax=v_max,cmap='bwr',interpolation='nearest')
ax.set_title('File:{0:s}, Layer:{1:d}'.format(title,layer))
ax.set_xlim((-0.5,ncols-0.5))
if inver == True:
ax.set_ylim((nrows-0.5,-0.5))
else:
ax.set_ylim((-0.5,nrows-0.5))
cbar= figx.colorbar(cax,format='%.5f')
numrows, numcols = array[layer].shape
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = array[layer][row][col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
ax.format_coord = format_coord
# -----
# Plot in default_line
# -----
def line_subplots(title,ax,figx,ncols,nrows,array,*inver):
figx=plt.figure()
ax=figx.add_subplot(111)
cax=ax.plot(array[ncols])
#cax=ax.imshow('array[0][layer],vmin=%f,vmax=%f' % (float(vmin),float(vmax)))
ax.set_title(title)
# ax.set_xlim((-0.5,ncols-0.5))
# if len(inver) == 0:
# ax.set_ylim((-0.5,nrows-0.5))
# else:
# ax.set_ylim((nrows-0.5,-0.5))
# cbar= figx.colorbar(cax,format='%.5f')
# -----
# Plot .pfb in im
# -----
def pfb_line_subplots(title,ax,figx,array,ncols,nrows,layer,*inver):
figx=plt.figure()
#ax=figx.add_subplot('%d%d%d'%(col,row,pos))
ax=figx.add_subplot(111)
ax.plot(array[layer][nrows])
ax.set_title('File:{0:s}, Layer:{1:d}'.format(title,layer))
#cbar= figx.colorbar(cax,format='%.5f')
# -----
# Chunk the value from pfb file
# -----
def pfb_chunk3d(pfba,fixplane,layer,ncols,nrows,nlayers):
#Must call the array[0] from readpfb
fp=fixplane
nx=ncols
ny=nrows
nz=nlayers
if fp == "x":
print("Chunk from X plane, layer: {0}".format(layer))
value=zeros((nz,ny))
for j in range(nz):
for i in range(ny):
value[j][i]=pfba[j][i][layer]
return value,ny,nz
elif fp =="y":
print("Chunk from Y plane, layer: {0}".format(layer))
value=zeros((nz,nx))
for j in range(nz):
for i in range(nx):
value[j][i]=pfba[j][layer][i]
return value,nx,nz
elif fp =="z":
print("Chunk from Z plane, layer: {0}".format(layer))
value=zeros((ny,nx))
for j in range(ny):
for i in range(nx):
value[j][i]=pfba[layer][j][i]
return value,nx,ny
else:
print("Wrong fix plane, \"x\", \"y\" or \"z\" only")
print("Get chunk at {0}-plane,{1} layer".format(fixplane,layer))
# -----
# Persing arguments
# -----
| lgpl-3.0 |
liangazhou/django-rdp | packages/PyDev/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/json/tests/test_encode_basestring_ascii.py | 143 | 2004 | from collections import OrderedDict
from json.tests import PyTest, CTest
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBasestringAscii(object):
def test_encode_basestring_ascii(self):
fname = self.json.encoder.encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = self.json.encoder.encode_basestring_ascii(input_string)
self.assertEqual(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))
def test_ordered_dict(self):
# See issue 6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(OrderedDict(items))
self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
class TestPyEncodeBasestringAscii(TestEncodeBasestringAscii, PyTest): pass
class TestCEncodeBasestringAscii(TestEncodeBasestringAscii, CTest): pass
| apache-2.0 |
markandrewj/node-gyp | gyp/pylib/gyp/xcode_emulation.py | 428 | 57360 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
# Populated lazily by _XcodeVersion. Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_xcode_version_cache = ()
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
# TODO: Look at VALID_ARCHS, ONLY_ACTIVE_ARCH; possibly set
# CURRENT_ARCH / NATIVE_ARCH env vars?
return self.xcode_settings[configname].get('ARCHS', [self._DefaultArch()])
def _GetStdout(self, cmdlist):
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return self._GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', [self._DefaultArch()])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', [self._DefaultArch()])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == "executable"):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
return l.replace('$(SDKROOT)', sdk_root)
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return self._GetStdout(['sw_vers', '-buildVersion'])
# This method ported from the logic in Homebrew's CLT version check
def _CLTVersion(self):
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = self._GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def _XcodeVersion(self):
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
if len(XcodeSettings._xcode_version_cache) == 0:
try:
version_list = self._GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError, "xcodebuild returned unexpected results"
except:
version = self._CLTVersion()
if version:
version = re.match('(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError, "No Xcode or CLT version detected!"
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XcodeSettings._xcode_version_cache = (version, build)
return XcodeSettings._xcode_version_cache
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = self._XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
if self._XcodeVersion() < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = self._GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
def _DefaultArch(self):
# For Mac projects, Xcode changed the default value used when ARCHS is not
# set from "i386" to "x86_64".
#
# For iOS projects, if ARCHS is unset, it defaults to "armv7 armv7s" when
# building for a device, and the simulator binaries are always build for
# "i386".
#
# For new projects, ARCHS is set to $(ARCHS_STANDARD_INCLUDING_64_BIT),
# which correspond to "armv7 armv7s arm64", and when building the simulator
# the architecture is either "i386" or "x86_64" depending on the simulated
# device (respectively 32-bit or 64-bit device).
#
# Since the value returned by this function is only used when ARCHS is not
# set, then on iOS we return "i386", as the default xcode project generator
# does not set ARCHS if it is not set in the .gyp file.
if self.isIOS:
return 'i386'
version, build = self._XcodeVersion()
if version >= '0500':
return 'x86_64'
return 'i386'
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices."""
for target_dict in targets.values():
for config_name in target_dict['configurations'].keys():
config = target_dict['configurations'][config_name]
new_config_name = config_name + '-iphoneos'
new_config_dict = copy.deepcopy(config)
if target_dict['toolset'] == 'target':
new_config_dict['xcode_settings']['ARCHS'] = ['armv7']
new_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
target_dict['configurations'][new_config_name] = new_config_dict
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
MoKee/android_kernel_zte_nx511j | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/SConsign.py | 34 | 12750 | """SCons.SConsign
Writing and reading information to the .sconsign file or files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConsign.py 5023 2010/06/14 22:05:46 scons"
import SCons.compat
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import SCons.dblite
import SCons.Warnings
def corrupt_dblite_warning(filename):
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%filename)
SCons.dblite.ignore_corrupt_dbfiles = 1
SCons.dblite.corruption_warning = corrupt_dblite_warning
#XXX Get rid of the global array so this becomes re-entrant.
sig_files = []
# Info for the database SConsign implementation (now the default):
# "DataBase" is a dictionary that maps top-level SConstruct directories
# to open database handles.
# "DB_Module" is the Python database module to create the handles.
# "DB_Name" is the base name of the database file (minus any
# extension the underlying DB module will add).
DataBase = {}
DB_Module = SCons.dblite
DB_Name = ".sconsign"
DB_sync_list = []
def Get_DataBase(dir):
global DataBase, DB_Module, DB_Name
top = dir.fs.Top
if not os.path.isabs(DB_Name) and top.repositories:
mode = "c"
for d in [top] + top.repositories:
if dir.is_under(d):
try:
return DataBase[d], mode
except KeyError:
path = d.entry_abspath(DB_Name)
try: db = DataBase[d] = DB_Module.open(path, mode)
except (IOError, OSError): pass
else:
if mode != "r":
DB_sync_list.append(db)
return db, mode
mode = "r"
try:
return DataBase[top], "c"
except KeyError:
db = DataBase[top] = DB_Module.open(DB_Name, "c")
DB_sync_list.append(db)
return db, "c"
except TypeError:
print "DataBase =", DataBase
raise
def Reset():
"""Reset global state. Used by unit tests that end up using
SConsign multiple times to get a clean slate for each test."""
global sig_files, DB_sync_list
sig_files = []
DB_sync_list = []
normcase = os.path.normcase
def write():
global sig_files
for sig_file in sig_files:
sig_file.write(sync=0)
for db in DB_sync_list:
try:
syncmethod = db.sync
except AttributeError:
pass # Not all anydbm modules have sync() methods.
else:
syncmethod()
class SConsignEntry(object):
"""
Wrapper class for the generic entry in a .sconsign file.
The Node subclass populates it with attributes as it pleases.
XXX As coded below, we do expect a '.binfo' attribute to be added,
but we'll probably generalize this in the next refactorings.
"""
current_version_id = 1
def __init__(self):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
_version_id = self.current_version_id
def convert_to_sconsign(self):
self.binfo.convert_to_sconsign()
def convert_from_sconsign(self, dir, name):
self.binfo.convert_from_sconsign(dir, name)
class Base(object):
"""
This is the controlling class for the signatures for the collection of
entries associated with a specific directory. The actual directory
association will be maintained by a subclass that is specific to
the underlying storage method. This class provides a common set of
methods for fetching and storing the individual bits of information
that make up signature entry.
"""
def __init__(self):
self.entries = {}
self.dirty = False
self.to_be_merged = {}
def get_entry(self, filename):
"""
Fetch the specified entry attribute.
"""
return self.entries[filename]
def set_entry(self, filename, obj):
"""
Set the entry.
"""
self.entries[filename] = obj
self.dirty = True
def do_not_set_entry(self, filename, obj):
pass
def store_info(self, filename, node):
entry = node.get_stored_info()
entry.binfo.merge(node.get_binfo())
self.to_be_merged[filename] = node
self.dirty = True
def do_not_store_info(self, filename, node):
pass
def merge(self):
for key, node in self.to_be_merged.items():
entry = node.get_stored_info()
try:
ninfo = entry.ninfo
except AttributeError:
# This happens with SConf Nodes, because the configuration
# subsystem takes direct control over how the build decision
# is made and its information stored.
pass
else:
ninfo.merge(node.get_ninfo())
self.entries[key] = entry
self.to_be_merged = {}
class DB(Base):
"""
A Base subclass that reads and writes signature information
from a global .sconsign.db* file--the actual file suffix is
determined by the database module.
"""
def __init__(self, dir):
Base.__init__(self)
self.dir = dir
db, mode = Get_DataBase(dir)
# Read using the path relative to the top of the Repository
# (self.dir.tpath) from which we're fetching the signature
# information.
path = normcase(dir.tpath)
try:
rawentries = db[path]
except KeyError:
pass
else:
try:
self.entries = pickle.loads(rawentries)
if not isinstance(self.entries, dict):
self.entries = {}
raise TypeError
except KeyboardInterrupt:
raise
except Exception, e:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt sconsign entry : %s (%s)\n"%(self.dir.tpath, e))
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
if mode == "r":
# This directory is actually under a repository, which means
# likely they're reaching in directly for a dependency on
# a file there. Don't actually set any entry info, so we
# won't try to write to that .sconsign.dblite file.
self.set_entry = self.do_not_set_entry
self.store_info = self.do_not_store_info
global sig_files
sig_files.append(self)
def write(self, sync=1):
if not self.dirty:
return
self.merge()
db, mode = Get_DataBase(self.dir)
# Write using the path relative to the top of the SConstruct
# directory (self.dir.path), not relative to the top of
# the Repository; we only write to our own .sconsign file,
# not to .sconsign files in Repositories.
path = normcase(self.dir.path)
for key, entry in self.entries.items():
entry.convert_to_sconsign()
db[path] = pickle.dumps(self.entries, 1)
if sync:
try:
syncmethod = db.sync
except AttributeError:
# Not all anydbm modules have sync() methods.
pass
else:
syncmethod()
class Dir(Base):
def __init__(self, fp=None, dir=None):
"""
fp - file pointer to read entries from
"""
Base.__init__(self)
if not fp:
return
self.entries = pickle.load(fp)
if not isinstance(self.entries, dict):
self.entries = {}
raise TypeError
if dir:
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
class DirFile(Dir):
"""
Encapsulates reading and writing a per-directory .sconsign file.
"""
def __init__(self, dir):
"""
dir - the directory for the file
"""
self.dir = dir
self.sconsign = os.path.join(dir.path, '.sconsign')
try:
fp = open(self.sconsign, 'rb')
except IOError:
fp = None
try:
Dir.__init__(self, fp, dir)
except KeyboardInterrupt:
raise
except:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%self.sconsign)
global sig_files
sig_files.append(self)
def write(self, sync=1):
"""
Write the .sconsign file to disk.
Try to write to a temporary file first, and rename it if we
succeed. If we can't write to the temporary file, it's
probably because the directory isn't writable (and if so,
how did we build anything in this directory, anyway?), so
try to write directly to the .sconsign file as a backup.
If we can't rename, try to copy the temporary contents back
to the .sconsign file. Either way, always try to remove
the temporary file at the end.
"""
if not self.dirty:
return
self.merge()
temp = os.path.join(self.dir.path, '.scons%d' % os.getpid())
try:
file = open(temp, 'wb')
fname = temp
except IOError:
try:
file = open(self.sconsign, 'wb')
fname = self.sconsign
except IOError:
return
for key, entry in self.entries.items():
entry.convert_to_sconsign()
pickle.dump(self.entries, file, 1)
file.close()
if fname != self.sconsign:
try:
mode = os.stat(self.sconsign)[0]
os.chmod(self.sconsign, 0666)
os.unlink(self.sconsign)
except (IOError, OSError):
# Try to carry on in the face of either OSError
# (things like permission issues) or IOError (disk
# or network issues). If there's a really dangerous
# issue, it should get re-raised by the calls below.
pass
try:
os.rename(fname, self.sconsign)
except OSError:
# An OSError failure to rename may indicate something
# like the directory has no write permission, but
# the .sconsign file itself might still be writable,
# so try writing on top of it directly. An IOError
# here, or in any of the following calls, would get
# raised, indicating something like a potentially
# serious disk or network issue.
open(self.sconsign, 'wb').write(open(fname, 'rb').read())
os.chmod(self.sconsign, mode)
try:
os.unlink(temp)
except (IOError, OSError):
pass
ForDirectory = DB
def File(name, dbm_module=None):
"""
Arrange for all signatures to be stored in a global .sconsign.db*
file.
"""
global ForDirectory, DB_Name, DB_Module
if name is None:
ForDirectory = DirFile
DB_Module = None
else:
ForDirectory = DB
DB_Name = name
if not dbm_module is None:
DB_Module = dbm_module
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
eahneahn/free | lib/python2.7/site-packages/simplejson/tests/test_pass1.py | 147 | 1746 | from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1,
1e-1,
1e00,2e+00,2e-00
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEqual(res, json.loads(out))
| agpl-3.0 |
denisenkom/django | tests/model_regress/models.py | 134 | 2195 | # coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
#5218: Test models with non-default primary keys / AutoFields
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
#18432: Chained foreign keys with to_field produce incorrect query
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, unique=True, to_field='model1')
| bsd-3-clause |
alrusdi/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/str/models.py | 92 | 1213 | # -*- coding: utf-8 -*-
"""
2. Adding __str__() or __unicode__() to models
Although it's not a strict requirement, each model should have a
``_str__()`` or ``__unicode__()`` method to return a "human-readable"
representation of the object. Do this not only for your own sanity when dealing
with the interactive prompt, but also because objects' representations are used
throughout Django's automatically-generated admin.
Normally, you should write ``__unicode__()`` method, since this will work for
all field types (and Django will automatically provide an appropriate
``__str__()`` method). However, you can write a ``__str__()`` method directly,
if you prefer. You must be careful to encode the results correctly, though.
"""
from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
# Caution: this is only safe if you are certain that headline will be
# in ASCII.
return self.headline
class InternationalArticle(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __unicode__(self):
return self.headline | gpl-3.0 |
movmov/cc | vendor/Twisted-10.0.0/twisted/conch/client/agent.py | 60 | 1740 | # -*- test-case-name: twisted.conch.test.test_default -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Accesses the key agent for user authentication.
Maintainer: Paul Swartz
"""
import os
from twisted.conch.ssh import agent, channel, keys
from twisted.internet import protocol, reactor
from twisted.python import log
class SSHAgentClient(agent.SSHAgentClient):
def __init__(self):
agent.SSHAgentClient.__init__(self)
self.blobs = []
def getPublicKeys(self):
return self.requestIdentities().addCallback(self._cbPublicKeys)
def _cbPublicKeys(self, blobcomm):
log.msg('got %i public keys' % len(blobcomm))
self.blobs = [x[0] for x in blobcomm]
def getPublicKey(self):
"""
Return a L{Key} from the first blob in C{self.blobs}, if any, or
return C{None}.
"""
if self.blobs:
return keys.Key.fromString(self.blobs.pop(0))
return None
class SSHAgentForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal)
d = cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
d.addCallback(self._cbGotLocal)
d.addErrback(lambda x:self.loseConnection())
self.buf = ''
def _cbGotLocal(self, local):
self.local = local
self.dataReceived = self.local.transport.write
self.local.dataReceived = self.write
def dataReceived(self, data):
self.buf += data
def closed(self):
if self.local:
self.local.loseConnection()
self.local = None
class SSHAgentForwardingLocal(protocol.Protocol):
pass
| apache-2.0 |
ry/tensorflow-resnet | image_processing.py | 3 | 17702 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in pararllel across mulitple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specifed by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer(
'input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset,
batch_size,
train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
# NOTE(ry) I unceremoniously removed all the bounding box code.
# Original here: https://github.com/tensorflow/models/blob/148a15fb043dacdd1595eb4c5267705fbd362c6a/inception/inception/image_processing.py
distorted_image = image
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, height,
width, resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.image_summary('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.image_summary('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.op_scope([image, height, width], scope, 'eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.input_size
width = FLAGS.input_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
filename: Tensor tf.string containing the filename
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/filename': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/class/label': tf.FixedLenFeature(
[1], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update({k: sparse_float32
for k in
['image/object/bbox/xmin', 'image/object/bbox/ymin',
'image/object/bbox/xmax', 'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/filename'], label, bbox, features['image/class/text']
def batch_inputs(dataset,
batch_size,
train,
num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
reader = tf.TFRecordReader()
_, example_serialized = reader.read(filename_queue)
filename, label_index, bbox, label_text = parse_example_proto(example_serialized)
fn = FLAGS.data_dir + '/' + label_text + '/' + filename
examples_qr = tf.train.queue_runner.QueueRunner(examples_queue,
[examples_queue.enqueue([fn])])
tf.train.queue_runner.add_queue_runner(examples_qr)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
whole_file_reader = tf.WholeFileReader()
_, image_buffer = whole_file_reader.read(examples_queue)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_index_batch, [batch_size])
| mit |
gelnior/newebe | newebe/apps/contacts/tests/features/steps.py | 1 | 10121 | import sys
import pytz
import time
from nose.tools import assert_equals, assert_in
from lettuce import step, world, before
from tornado.httpclient import HTTPError
sys.path.append("../")
from newebe.lib.slugify import slugify
from newebe.apps.contacts.models import Contact, ContactTag, ContactManager
from newebe.apps.activities.models import ActivityManager
from newebe.apps.contacts.models import STATE_WAIT_APPROVAL, STATE_TRUSTED
from newebe.apps.contacts.models import STATE_PENDING, STATE_ERROR
from newebe.lib.test_util import NewebeClient, db, db2, reset_documents
from newebe.lib import date_util
from newebe.lib.test_util import ROOT_URL, SECOND_NEWEBE_ROOT_URL
@step(u'Clear contacts')
def clear_contacts(step):
reset_documents(Contact, ContactManager.getContacts)
reset_documents(Contact, ContactManager.getContacts, db2)
@before.all
def set_browers():
reset_documents(Contact, ContactManager.getContacts)
reset_documents(Contact, ContactManager.getContacts, db2)
reset_documents(ContactTag, ContactManager.getTags)
world.browser = NewebeClient()
world.browser.set_default_user()
world.browser.login("password")
try:
world.browser2 = NewebeClient()
world.browser2.set_default_user_2(SECOND_NEWEBE_ROOT_URL)
world.user2 = world.browser2.user
world.browser2.login("password")
except HTTPError:
print "[WARNING] Second newebe instance does not look started"
@step(u'Convert default user to contact')
def convert_default_user_to_contact(step):
world.contact = world.user.asContact()
@step(u'Check that contact has same properties as default user')
def check_that_contact_has_same_properties_as_default_user(step):
assert world.user.url == world.contact.url
assert world.user.key == world.contact.key
assert world.user.name == world.contact.name
assert world.user.description == world.contact.description
@step(u'Deletes contacts')
def deletes_contacts(step):
reset_documents(Contact, ContactManager.getContacts)
reset_documents(Contact, ContactManager.getContacts, db2)
@step(u'Creates contacts')
def creates_contacts(step):
contact = Contact()
contact.url = "http://localhost/1/"
contact.slug = slugify(contact.url)
contact.state = STATE_PENDING
contact.key = "key1"
contact.save()
contact2 = Contact()
contact2.url = "http://localhost/2/"
contact2.slug = slugify(contact2.url)
contact2.state = STATE_TRUSTED
contact2.key = "key2"
contact2.save()
contact3 = Contact()
contact3.url = "http://localhost/3/"
contact3.slug = slugify(contact3.url)
contact3.state = STATE_WAIT_APPROVAL
contact.key = "key3"
contact3.save()
@step(u'Get contacts')
def get_contacts(step):
world.contacts = ContactManager.getContacts()
@step(u'Check that there is (\d+) contacts')
def check_that_there_is_x_contacts(step, nb_contacts):
assert int(nb_contacts) == len(world.contacts)
@step(u'Get requested contacts')
def get_requested_contacts(step):
world.contacts = ContactManager.getRequestedContacts()
@step(u'Get pending contacts')
def get_pending_contacts(step):
world.contacts = ContactManager.getPendingContacts()
@step(u'Get trusted contacts')
def get_trusted_contacts(step):
world.contacts = ContactManager.getTrustedContacts()
@step(u'Get contact with slug : ([0-9a-z-]+)')
def get_contact_with_slug(step, slug):
world.contact = ContactManager.getContact(slug)
@step(u'Check contact is null')
def check_contact_is_null(step):
assert world.contact is None
@step(u'Check contact is not null')
def check_contact_is_not_null(step):
assert world.contact is not None
@step(u'Get trusted contact with key : ([0-9a-z-]+)')
def get_trusted_contact_with_key(step, key):
world.contact = ContactManager.getTrustedContact(key)
@step(u'I create one contact with tag "([^"]*)"')
def i_create_one_contact_with_tag_group1(step, tag):
world.browser.post("contacts/tags/",
body='{"name":"%s"}' % tag)
contact = Contact(
url="http://localhost/1/",
slug=slugify(unicode("http://localhost/1/")),
state=STATE_PENDING,
key="key1",
tags=["all", tag]
)
contact.save()
@step(u'When I retrieve all tags')
def when_i_retrieve_all_tags(step):
world.tags = ContactManager.getTags()
world.tags = [tag.name for tag in world.tags]
world.tags.append("all")
@step(u'I got a list with "([^"]*)", "([^"]*)" and "([^"]*)" inside it')
def i_got_a_list_with_group1_group2_and_group3_inside_it(step, tag1, tag2, tag3):
assert_equals(len(world.tags), 4)
assert_in(tag1, world.tags)
assert_in(tag2, world.tags)
assert_in(tag3, world.tags)
## Handlers
@step(u'Through handler retrieve requested contacts')
def through_handler_retrieve_requested_contacts(step):
world.contacts = world.browser.fetch_documents("contacts/requested/")
@step(u'Through handlers retrieve pending contacts')
def through_handlers_retrieve_pending_contacts(step):
world.contacts = world.browser.fetch_documents("contacts/pending/")
@step(u'Through handlers retrieve trusted contacts')
def through_handlers_retrieve_trusted_contacts(step):
world.contacts = world.browser.fetch_documents("contacts/trusted/")
@step(u'Through handlers retrieve all contacts')
def through_handlers_retrieve_all_contacts(step):
world.contacts = world.browser.fetch_documents("contacts/")
@step(u'Create a default contact')
def create_a_default_contact(step):
contact = Contact()
contact.url = u"http://default:8000/"
contact.slug = slugify(contact.url)
contact.state = STATE_TRUSTED
contact.description = "desc 1"
contact.name = "default contact 1"
contact.save()
@step(u'Change default contact data through handlers')
def change_default_contact_data_through_handlers(step):
contact = ContactManager.getContact(slugify(u"http://default:8000/"))
contact.description = "desc 2"
contact.url = u"http://default:8010/"
contact.name = "default contact 2"
world.browser.put("contacts/update-profile/", contact.toJson())
@step(u'Checks that default contact data are updated')
def checks_that_default_contact_data_are_updated(step):
contact = ContactManager.getContact(slugify(u"http://default:8000/"))
assert "http://default:8010/" == contact.url
assert "default contact 2" == contact.name
assert "desc 2" == contact.description
@step(u'Checks that contact update activity is properly created')
def checks_that_contact_update_activity_is_properly_created(step):
activity = ActivityManager.get_all().first()
assert "modifies" == activity.verb
assert "profile" == activity.docType
assert False == activity.isMine
assert "default contact 2" == activity.author
@step(u'Through handler retrieve contact with slug ([0-9a-z-]+)')
def through_handler_retrieve_contact_with_slug(step, slug):
try:
contact = world.browser.fetch_document("contacts/" + slug)
world.contacts = [contact]
except HTTPError:
world.contacts = []
@step(u'Through handler delete contact with slug ([0-9a-z-]+)')
def through_handler_delete_contact_with_slug_http_localhost_1(step, slug):
world.contacts = []
world.browser.delete("contacts/" + slug)
## Adding contact
@step(u'Deletes seconde newebe contacts')
def deletes_seconde_newebe_contacts(step):
contacts = world.browser2.fetch_documents("contacts/requested/")
for contact in contacts:
world.browser2.delete("contacts/" + contact["slug"])
@step(u'On first newebe add second newebe as a contact')
def on_first_newebe_add_second_newebe_as_a_contact(step):
print world.browser2.root_url
world.browser.post("contacts/",
body='{"url":"%s"}' % world.browser2.root_url)
time.sleep(0.3)
@step(u'Check that first contact status is pending')
def check_that_first_contact_status_is_pending(step):
assert 1 == len(world.contacts)
assert_equals(STATE_PENDING, world.contacts[0]["state"])
@step(u'From second newebe retrieve all contacts')
def from_second_newebe_retrieve_all_contacts(step):
time.sleep(0.3)
world.contacts = world.browser2.fetch_documents("contacts/")
@step(u'Check that first contact status is waiting for approval')
def check_that_first_contact_status_is_waiting_for_approval(step):
assert 1 == len(world.contacts)
assert STATE_WAIT_APPROVAL == world.contacts[0]["state"]
@step(u'On second newebe confirm first newebe request')
def on_seconde_newebe_confirm_first_newebe_request(step):
world.browser2.put('contacts/%s' % slugify(ROOT_URL),
body='{"state":"%s", "tags":null}' % STATE_TRUSTED)
@step(u'Check that first contact status is trusted')
def check_that_first_contact_status_is_trusted(step):
assert 1 == len(world.contacts)
assert STATE_TRUSTED == world.contacts[0]["state"]
# Retry
@step(u'Set first contact state as error')
def set_first_contact_state_as_error(step):
contact = ContactManager.getContacts().first()
contact.state = STATE_ERROR
contact.save()
@step(u'Send a retry request for this contact')
def send_a_retry_request_for_this_contact(step):
world.browser.post("contacts/%s/retry/" % slugify(SECOND_NEWEBE_ROOT_URL),
"")
time.sleep(0.3)
# Timezone
@step(u'Check that request date is set to "([a-zA-Z//]+)" timezone')
def check_that_request_date_is_set_to_europe_paris_timezone(step, timezone):
Contact._db = db2
contact = ContactManager.getRequestedContacts().first()
Contact._db = db
date = date_util.get_date_from_db_date(world.contacts[0]["requestDate"])
tz = pytz.timezone(timezone)
date = date.replace(tzinfo=tz)
assert_equals(
date_util.convert_utc_date_to_timezone(contact.requestDate, tz),
date)
# Tags
@step(u'When I retrieve through handler all tags')
def when_i_retrieve_through_handler_all_tags(step):
world.tags = world.browser.fetch_documents("contacts/tags/")
world.tags = [tag["name"] for tag in world.tags]
| agpl-3.0 |
cruzegoodin/TSC-ShippingDetails | flask/lib/python2.7/site-packages/guess_language/guess_language.py | 59 | 12069 | ''' Guess the language of text.
Based on guesslanguage.cpp by Jacob R Rideout for KDE
http://websvn.kde.org/branches/work/sonnet-refactoring/common/nlp/guesslanguage.cpp?view=markup
which itself is based on Language::Guess by Maciej Ceglowski
http://languid.cantbedone.org/
Copyright (c) 2008, Kent S Johnson
C++ version is Copyright (c) 2006 Jacob R Rideout <kde@jacobrideout.net>
Perl version is (c) 2004-6 Maciej Ceglowski
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Note: Language::Guess is GPL-licensed. KDE developers received permission
from the author to distribute their port under LGPL:
http://lists.kde.org/?l=kde-sonnet&m=116910092228811&w=2
'''
__all__ = 'guessLanguage guessLanguageName guessLanguageInfo guessLanguageTag guessLanguageId'.split()
import codecs, os, re, sys, unicodedata
from collections import defaultdict
from blocks import unicodeBlock
MIN_LENGTH = 20
BASIC_LATIN = "en ceb ha so tlh id haw la sw eu nr nso zu xh ss st tn ts".split()
EXTENDED_LATIN = "cs af pl hr ro sk sl tr hu az et sq ca es fr de nl it da is nb sv fi lv pt ve lt tl cy".split()
ALL_LATIN = BASIC_LATIN + EXTENDED_LATIN
CYRILLIC = "ru uk kk uz mn sr mk bg ky".split()
ARABIC = "ar fa ps ur".split()
DEVANAGARI = "hi ne".split()
# NOTE mn appears twice, once for mongolian script and once for CYRILLIC
SINGLETONS = [
('Armenian', 'hy'),
('Hebrew', 'he'),
('Bengali', 'bn'),
('Gurmukhi', 'pa'),
('Greek', 'el'),
('Gujarati', 'gu'),
('Oriya', 'or'),
('Tamil', 'ta'),
('Telugu', 'te'),
('Kannada', 'kn'),
('Malayalam', 'ml'),
('Sinhala', 'si'),
('Thai', 'th'),
('Lao', 'lo'),
('Tibetan', 'bo'),
('Burmese', 'my'),
('Georgian', 'ka'),
('Mongolian', 'mn-Mong'),
('Khmer', 'km'),
]
PT = "pt_BR pt_PT".split()
UNKNOWN = 'UNKNOWN'
models = {}
NAME_MAP = {
"ab" : "Abkhazian",
"af" : "Afrikaans",
"ar" : "Arabic",
"az" : "Azeri",
"be" : "Byelorussian",
"bg" : "Bulgarian",
"bn" : "Bengali",
"bo" : "Tibetan",
"br" : "Breton",
"ca" : "Catalan",
"ceb" : "Cebuano",
"cs" : "Czech",
"cy" : "Welsh",
"da" : "Danish",
"de" : "German",
"el" : "Greek",
"en" : "English",
"eo" : "Esperanto",
"es" : "Spanish",
"et" : "Estonian",
"eu" : "Basque",
"fa" : "Farsi",
"fi" : "Finnish",
"fo" : "Faroese",
"fr" : "French",
"fy" : "Frisian",
"gd" : "Scots Gaelic",
"gl" : "Galician",
"gu" : "Gujarati",
"ha" : "Hausa",
"haw" : "Hawaiian",
"he" : "Hebrew",
"hi" : "Hindi",
"hr" : "Croatian",
"hu" : "Hungarian",
"hy" : "Armenian",
"id" : "Indonesian",
"is" : "Icelandic",
"it" : "Italian",
"ja" : "Japanese",
"ka" : "Georgian",
"kk" : "Kazakh",
"km" : "Cambodian",
"ko" : "Korean",
"ku" : "Kurdish",
"ky" : "Kyrgyz",
"la" : "Latin",
"lt" : "Lithuanian",
"lv" : "Latvian",
"mg" : "Malagasy",
"mk" : "Macedonian",
"ml" : "Malayalam",
"mn" : "Mongolian",
"mr" : "Marathi",
"ms" : "Malay",
"nd" : "Ndebele",
"ne" : "Nepali",
"nl" : "Dutch",
"nn" : "Nynorsk",
"no" : "Norwegian",
"nso" : "Sepedi",
"pa" : "Punjabi",
"pl" : "Polish",
"ps" : "Pashto",
"pt" : "Portuguese",
"ro" : "Romanian",
"ru" : "Russian",
"sa" : "Sanskrit",
"sh" : "Serbo-Croatian",
"sk" : "Slovak",
"sl" : "Slovene",
"so" : "Somali",
"sq" : "Albanian",
"sr" : "Serbian",
"sv" : "Swedish",
"sw" : "Swahili",
"ta" : "Tamil",
"te" : "Telugu",
"th" : "Thai",
"tl" : "Tagalog",
"tlh" : "Klingon",
"tn" : "Setswana",
"tr" : "Turkish",
"ts" : "Tsonga",
"tw" : "Twi",
"uk" : "Ukrainian",
"uk" : "Ukranian",
"ur" : "Urdu",
"uz" : "Uzbek",
"ve" : "Venda",
"vi" : "Vietnamese",
"xh" : "Xhosa",
"zh" : "Chinese",
"zh-tw" : "Traditional Chinese (Taiwan)",
"zu" : "Zulu",
}
IANA_MAP = {
"ab" : 12026,
"af" : 40,
"ar" : 26020,
"az" : 26030,
"be" : 11890,
"bg" : 26050,
"bn" : 26040,
"bo" : 26601,
"br" : 1361,
"ca" : 3,
"ceb" : 26060,
"cs" : 26080,
"cy" : 26560,
"da" : 26090,
"de" : 26160,
"el" : 26165,
"en" : 26110,
"eo" : 11933,
"es" : 26460,
"et" : 26120,
"eu" : 1232,
"fa" : 26130,
"fi" : 26140,
"fo" : 11817,
"fr" : 26150,
"fy" : 1353,
"gd" : 65555,
"gl" : 1252,
"gu" : 26599,
"ha" : 26170,
"haw" : 26180,
"he" : 26592,
"hi" : 26190,
"hr" : 26070,
"hu" : 26200,
"hy" : 26597,
"id" : 26220,
"is" : 26210,
"it" : 26230,
"ja" : 26235,
"ka" : 26600,
"kk" : 26240,
"km" : 1222,
"ko" : 26255,
"ku" : 11815,
"ky" : 26260,
"la" : 26280,
"lt" : 26300,
"lv" : 26290,
"mg" : 1362,
"mk" : 26310,
"ml" : 26598,
"mn" : 26320,
"mr" : 1201,
"ms" : 1147,
"ne" : 26330,
"nl" : 26100,
"nn" : 172,
"no" : 26340,
"pa" : 65550,
"pl" : 26380,
"ps" : 26350,
"pt" : 26390,
"ro" : 26400,
"ru" : 26410,
"sa" : 1500,
"sh" : 1399,
"sk" : 26430,
"sl" : 26440,
"so" : 26450,
"sq" : 26010,
"sr" : 26420,
"sv" : 26480,
"sw" : 26470,
"ta" : 26595,
"te" : 26596,
"th" : 26594,
"tl" : 26490,
"tlh" : 26250,
"tn" : 65578,
"tr" : 26500,
"tw" : 1499,
"uk" : 26510,
"uk" : 26520,
"ur" : 26530,
"uz" : 26540,
"vi" : 26550,
"zh" : 26065,
"zh-tw" : 22,
}
def _load_models():
modelsDir = os.path.join(os.path.dirname(__file__), 'trigrams')
modelsList = os.listdir(modelsDir)
lineRe = re.compile(r"(.{3})\s+(.*)")
for modelFile in modelsList:
modelPath = os.path.join(modelsDir, modelFile)
if os.path.isdir(modelPath):
continue
f = codecs.open(modelPath, 'r', 'utf-8')
model = {} # QHash<QString,int> model
for line in f:
m = lineRe.search(line)
if m:
model[m.group(1)] = int(m.group(2))
models[modelFile.lower()] = model
_load_models()
def guessLanguage(text):
''' Returns the language code, i.e. 'en' '''
if not text:
return UNKNOWN
if isinstance(text, str):
text = unicode(text, 'utf-8')
text = normalize(text)
return _identify(text, find_runs(text))
def guessLanguageInfo(text):
"""
Returns (tag, id, name) i.e. ('en', 26110, 'english')
"""
tag = guessLanguage(text)
if tag == UNKNOWN:
return UNKNOWN,UNKNOWN,UNKNOWN
id = _getId(tag)
name = _getName(tag)
return tag,id,name
# An alias for guessLanguage
guessLanguageTag = guessLanguage
def guessLanguageId(text):
"""
Returns the language id. i.e. 26110
"""
lang = guessLanguage(text)
return _getId(lang)
def guessLanguageName(text):
"""
Returns the language name. i.e. 'english'
"""
lang = guessLanguage(text)
return _getName(lang)
def _getId(iana):
return IANA_MAP.get(iana, UNKNOWN)
def _getName(iana):
return NAME_MAP.get(iana, UNKNOWN)
def find_runs(text):
''' Count the number of characters in each character block '''
run_types = defaultdict(int)
totalCount = 0
for c in text:
if c.isalpha():
block = unicodeBlock(c)
run_types[block] += 1
totalCount += 1
# import pprint
# pprint.pprint(run_types)
# return run types that used for 40% or more of the string
# always return basic latin if found more than 15%
# and extended additional latin if over 10% (for Vietnamese)
relevant_runs = []
for key, value in run_types.items():
pct = (value*100) / totalCount
if pct >=40:
relevant_runs.append(key)
elif key == "Basic Latin" and ( pct >=15 ):
relevant_runs.append(key)
elif key == "Latin Extended Additional" and ( pct >=10 ):
relevant_runs.append(key)
return relevant_runs
def _identify(sample, scripts):
if len(sample) < 3:
return UNKNOWN
if "Hangul Syllables" in scripts or "Hangul Jamo" in scripts \
or "Hangul Compatibility Jamo" in scripts or "Hangul" in scripts:
return "ko"
if "Greek and Coptic" in scripts:
return "el"
if "Katakana" in scripts:
return "ja"
if "CJK Unified Ideographs" in scripts or "Bopomofo" in scripts \
or "Bopomofo Extended" in scripts or "KangXi Radicals" in scripts:
# This is in both Ceglowski and Rideout
# I can't imagine why...
# or "Arabic Presentation Forms-A" in scripts
return "zh"
if "Cyrillic" in scripts:
return check( sample, CYRILLIC )
if "Arabic" in scripts or "Arabic Presentation Forms-A" in scripts or "Arabic Presentation Forms-B" in scripts:
return check( sample, ARABIC )
if "Devanagari" in scripts:
return check( sample, DEVANAGARI )
# Try languages with unique scripts
for blockName, langName in SINGLETONS:
if blockName in scripts:
return langName
if "Latin Extended Additional" in scripts:
return "vi"
if "Extended Latin" in scripts:
latinLang = check( sample, EXTENDED_LATIN )
if latinLang == "pt":
return check(sample, PT)
else:
return latinLang
if "Basic Latin" in scripts:
return check( sample, ALL_LATIN )
return UNKNOWN
def check(sample, langs):
if len(sample) < MIN_LENGTH:
return UNKNOWN
scores = []
model = createOrderedModel(sample) # QMap<int,QString>
for key in langs:
lkey = key.lower()
if lkey in models:
scores.append( (distance(model, models[lkey]), key) )
if not scores:
return UNKNOWN
# we want the lowest score, less distance = greater chance of match
# pprint(sorted(scores))
return min(scores)[1]
def createOrderedModel(content):
''' Create a list of trigrams in content sorted by frequency '''
trigrams = defaultdict(int) # QHash<QString,int>
content = content.lower()
for i in xrange(0, len(content)-2):
trigrams[content[i:i+3]]+=1
return sorted(trigrams.keys(), key=lambda k: (-trigrams[k], k))
spRe = re.compile(r"\s\s", re.UNICODE)
MAXGRAMS = 300
def distance(model, knownModel):
dist = 0
for i, value in enumerate(model[:MAXGRAMS]):
if not spRe.search(value):
if value in knownModel:
dist += abs(i - knownModel[value])
else:
dist += MAXGRAMS
return dist
def _makeNonAlphaRe():
nonAlpha = [u'[^']
for i in range(sys.maxunicode):
c = unichr(i)
if c.isalpha(): nonAlpha.append(c)
nonAlpha.append(u']')
nonAlpha = u"".join(nonAlpha)
return re.compile(nonAlpha)
nonAlphaRe = _makeNonAlphaRe()
spaceRe = re.compile('\s+', re.UNICODE)
def normalize(u):
''' Convert to normalized unicode.
Remove non-alpha chars and compress runs of spaces.
'''
u = unicodedata.normalize('NFC', u)
u = nonAlphaRe.sub(' ', u)
u = spaceRe.sub(' ', u)
return u | bsd-3-clause |
chrisxue815/leetcode_python | problems/test_0017_iterative_backtrack.py | 1 | 1110 | import unittest
from typing import List
import utils
digit_letters = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
# O(4^n) time. O(4^n) space. Iterative backtracking.
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
result = []
digits = [ord(i) - ord('0') for i in digits]
indexes = [0] * len(digits)
result.append(''.join(digit_letters[digit][0] for digit in digits))
while True:
for i in range(len(digits) - 1, -1, -1):
indexes[i] += 1
j = indexes[i]
if j < len(digit_letters[digits[i]]):
result.append(''.join(digit_letters[digits[i]][indexes[i]] for i in range(len(digits))))
break
else:
indexes[i] = 0
else:
return result
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, asserter=self.assertCountEqual)
if __name__ == '__main__':
unittest.main()
| unlicense |
jeremiahyan/odoo | addons/sale_product_configurator/tests/common.py | 1 | 4895 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from odoo.tests.common import TransactionCase
from odoo.modules.module import get_module_resource
class TestProductConfiguratorCommon(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Setup attributes and attributes values
cls.product_attribute_1 = cls.env['product.attribute'].create({
'name': 'Legs',
'sequence': 10,
})
product_attribute_value_1 = cls.env['product.attribute.value'].create({
'name': 'Steel',
'attribute_id': cls.product_attribute_1.id,
'sequence': 1,
})
product_attribute_value_2 = cls.env['product.attribute.value'].create({
'name': 'Aluminium',
'attribute_id': cls.product_attribute_1.id,
'sequence': 2,
})
product_attribute_2 = cls.env['product.attribute'].create({
'name': 'Color',
'sequence': 20,
})
product_attribute_value_3 = cls.env['product.attribute.value'].create({
'name': 'White',
'attribute_id': product_attribute_2.id,
'sequence': 1,
})
product_attribute_value_4 = cls.env['product.attribute.value'].create({
'name': 'Black',
'attribute_id': product_attribute_2.id,
'sequence': 2,
})
# Create product template
cls.product_product_custo_desk = cls.env['product.template'].create({
'name': 'Customizable Desk (TEST)',
'standard_price': 500.0,
'list_price': 750.0,
})
# Generate variants
cls.env['product.template.attribute.line'].create([{
'product_tmpl_id': cls.product_product_custo_desk.id,
'attribute_id': cls.product_attribute_1.id,
'value_ids': [(4, product_attribute_value_1.id), (4, product_attribute_value_2.id)],
}, {
'product_tmpl_id': cls.product_product_custo_desk.id,
'attribute_id': product_attribute_2.id,
'value_ids': [(4, product_attribute_value_3.id), (4, product_attribute_value_4.id)],
}])
# Apply a price_extra for the attribute Aluminium
cls.product_product_custo_desk.attribute_line_ids[0].product_template_value_ids[1].price_extra = 50.40
# Add a Custom attribute
product_attribute_value_custom = cls.env['product.attribute.value'].create({
'name': 'Custom',
'attribute_id': cls.product_attribute_1.id,
'sequence': 3,
'is_custom': True
})
cls.product_product_custo_desk.attribute_line_ids[0].write({'value_ids': [(4, product_attribute_value_custom.id)]})
# Disable the aluminium + black product
cls.product_product_custo_desk.product_variant_ids[3].active = False
# Setup a first optional product
img_path = get_module_resource('product', 'static', 'img', 'product_product_11-image.png')
img_content = base64.b64encode(open(img_path, "rb").read())
cls.product_product_conf_chair = cls.env['product.template'].create({
'name': 'Conference Chair (TEST)',
'image_1920': img_content,
'list_price': 16.50,
})
cls.env['product.template.attribute.line'].create({
'product_tmpl_id': cls.product_product_conf_chair.id,
'attribute_id': cls.product_attribute_1.id,
'value_ids': [(4, product_attribute_value_1.id), (4, product_attribute_value_2.id)],
})
cls.product_product_conf_chair.attribute_line_ids[0].product_template_value_ids[1].price_extra = 6.40
cls.product_product_custo_desk.optional_product_ids = [(4, cls.product_product_conf_chair.id)]
# Setup a second optional product
cls.product_product_conf_chair_floor_protect = cls.env['product.template'].create({
'name': 'Chair floor protection',
'list_price': 12.0,
})
cls.product_product_conf_chair.optional_product_ids = [(4, cls.product_product_conf_chair_floor_protect.id)]
def _create_pricelist(cls, pricelists):
for pricelist in pricelists:
if not pricelist.item_ids.filtered(lambda i: i.product_tmpl_id == cls.product_product_custo_desk and i.price_discount == 20):
cls.env['product.pricelist.item'].create({
'base': 'list_price',
'applied_on': '1_product',
'pricelist_id': pricelist.id,
'product_tmpl_id': cls.product_product_custo_desk.id,
'price_discount': 20,
'min_quantity': 2,
'compute_price': 'formula',
})
pricelist.discount_policy = 'without_discount'
| gpl-3.0 |
eduNEXT/edunext-platform | common/djangoapps/third_party_auth/api/permissions.py | 3 | 2103 | """
Third party auth API related permissions
"""
import logging
from edx_rest_framework_extensions.auth.jwt.decoder import decode_jwt_filters
from edx_rest_framework_extensions.permissions import (
IsStaff,
IsSuperuser,
JwtHasScope,
JwtRestrictedApplication,
NotJwtRestrictedApplication
)
from rest_condition import C
from rest_framework.permissions import BasePermission
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
log = logging.getLogger(__name__)
class JwtHasTpaProviderFilterForRequestedProvider(BasePermission):
"""
Ensures the JWT used to authenticate contains the appropriate tpa_provider
filter for the provider_id requested in the view.
"""
message = 'JWT missing required tpa_provider filter.'
def has_permission(self, request, view):
"""
Ensure that the provider_id kwarg provided to the view exists exists
in the tpa_provider filters in the JWT used to authenticate.
"""
provider_id = view.kwargs.get('provider_id')
if not provider_id:
log.warning("Permission JwtHasTpaProviderFilterForRequestedProvider requires a view with provider_id.")
return False
jwt_filters = decode_jwt_filters(request.auth)
for filter_type, filter_value in jwt_filters:
if filter_type == 'tpa_provider' and filter_value == provider_id:
return True
log.warning(
"Permission JwtHasTpaProviderFilterForRequestedProvider: required filter tpa_provider:%s was not found.",
provider_id,
)
return False
# TODO: Remove ApiKeyHeaderPermission. Check deprecated_api_key_header custom metric for active usage.
_NOT_JWT_RESTRICTED_TPA_PERMISSIONS = (
C(NotJwtRestrictedApplication) &
(C(IsSuperuser) | ApiKeyHeaderPermission | C(IsStaff))
)
_JWT_RESTRICTED_TPA_PERMISSIONS = (
C(JwtRestrictedApplication) &
JwtHasScope &
JwtHasTpaProviderFilterForRequestedProvider
)
TPA_PERMISSIONS = (
(_NOT_JWT_RESTRICTED_TPA_PERMISSIONS | _JWT_RESTRICTED_TPA_PERMISSIONS)
)
| agpl-3.0 |
PaoloC68/django-registration | registration/admin.py | 107 | 1624 | from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| bsd-3-clause |
molebot/vnpy | vn.demo/ctpdemo/demoApi.py | 88 | 35262 | # encoding: UTF-8
"""
该文件中包含的是交易平台的底层接口相关的部分,
主要对API进行了一定程度的简化封装,方便开发。
"""
import os
from vnctpmd import MdApi
from vnctptd import TdApi
from eventEngine import *
from ctp_data_type import defineDict
#----------------------------------------------------------------------
def print_dict(d):
"""打印API收到的字典,该函数主要用于开发时的debug"""
print '-'*60
l = d.keys()
l.sort()
for key in l:
print key, ':', d[key]
########################################################################
class DemoMdApi(MdApi):
"""
Demo中的行情API封装
封装后所有数据自动推送到事件驱动引擎中,由其负责推送到各个监听该事件的回调函数上
对用户暴露的主动函数包括:
登陆 login
订阅合约 subscribe
"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""
API对象的初始化函数
"""
super(DemoMdApi, self).__init__()
# 事件引擎,所有数据都推送到其中,再由事件引擎进行分发
self.__eventEngine = eventEngine
# 请求编号,由api负责管理
self.__reqid = 0
# 以下变量用于实现连接和重连后的自动登陆
self.__userid = ''
self.__password = ''
self.__brokerid = ''
# 以下集合用于重连后自动订阅之前已订阅的合约,使用集合为了防止重复
self.__setSubscribed = set()
# 初始化.con文件的保存目录为\mdconnection,注意这个目录必须已存在,否则会报错
self.createFtdcMdApi(os.getcwd() + '\\mdconnection\\')
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
event = Event(type_=EVENT_LOG)
event.dict_['log'] = u'行情服务器连接成功'
self.__eventEngine.put(event)
# 如果用户已经填入了用户名等等,则自动尝试连接
if self.__userid:
req = {}
req['UserID'] = self.__userid
req['Password'] = self.__password
req['BrokerID'] = self.__brokerid
self.__reqid = self.__reqid + 1
self.reqUserLogin(req, self.__reqid)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
event = Event(type_=EVENT_LOG)
event.dict_['log'] = u'行情服务器连接断开'
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
# 因为API的心跳报警比较常被触发,且与API工作关系不大,因此选择忽略
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
event = Event(type_=EVENT_LOG)
log = u'行情错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
event = Event(type_=EVENT_LOG)
if error['ErrorID'] == 0:
log = u'行情服务器登陆成功'
else:
log = u'登陆回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
## 重连后自动订阅之前已经订阅过的合约
#if self.__setSubscribed:
#for instrument in self.__setSubscribed:
#self.subscribe(instrument[0], instrument[1])
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
event = Event(type_=EVENT_LOG)
if error['ErrorID'] == 0:
log = u'行情服务器登出成功'
else:
log = u'登出回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
# 行情推送收到后,同时触发常规行情事件,以及特定合约行情事件,用于满足不同类型的监听
# 常规行情事件
event1 = Event(type_=EVENT_MARKETDATA)
event1.dict_['data'] = data
self.__eventEngine.put(event1)
# 特定合约行情事件
event2 = Event(type_=(EVENT_MARKETDATA_CONTRACT+data['InstrumentID']))
event2.dict_['data'] = data
self.__eventEngine.put(event2)
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
#----------------------------------------------------------------------
def login(self, address, userid, password, brokerid):
"""连接服务器"""
self.__userid = userid
self.__password = password
self.__brokerid = brokerid
# 注册服务器地址
self.registerFront(address)
# 初始化连接,成功会调用onFrontConnected
self.init()
#----------------------------------------------------------------------
def subscribe(self, instrumentid, exchangeid):
"""订阅合约"""
self.subscribeMarketData(instrumentid)
instrument = (instrumentid, exchangeid)
self.__setSubscribed.add(instrument)
########################################################################
class DemoTdApi(TdApi):
"""
Demo中的交易API封装
主动函数包括:
login 登陆
getInstrument 查询合约信息
getAccount 查询账号资金
getInvestor 查询投资者
getPosition 查询持仓
sendOrder 发单
cancelOrder 撤单
"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""API对象的初始化函数"""
super(DemoTdApi, self).__init__()
# 事件引擎,所有数据都推送到其中,再由事件引擎进行分发
self.__eventEngine = eventEngine
# 请求编号,由api负责管理
self.__reqid = 0
# 报单编号,由api负责管理
self.__orderref = 0
# 以下变量用于实现连接和重连后的自动登陆
self.__userid = ''
self.__password = ''
self.__brokerid = ''
# 合约字典(保存合约查询数据)
self.__dictInstrument = {}
# 初始化.con文件的保存目录为\tdconnection
self.createFtdcTraderApi(os.getcwd() + '\\tdconnection\\')
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
event = Event(type_=EVENT_LOG)
event.dict_['log'] = u'交易服务器连接成功'
self.__eventEngine.put(event)
# 如果用户已经填入了用户名等等,则自动尝试连接
if self.__userid:
req = {}
req['UserID'] = self.__userid
req['Password'] = self.__password
req['BrokerID'] = self.__brokerid
self.__reqid = self.__reqid + 1
self.reqUserLogin(req, self.__reqid)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
event = Event(type_=EVENT_LOG)
event.dict_['log'] = u'交易服务器连接断开'
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspAuthenticate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
event = Event(type_=EVENT_LOG)
if error['ErrorID'] == 0:
log = u'交易服务器登陆成功'
else:
log = u'登陆回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
self.getSettlement() # 登录完成后立即查询结算信息
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
event = Event(type_=EVENT_LOG)
if error['ErrorID'] == 0:
log = u'交易服务器登出成功'
else:
log = u'登出回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
event = Event(type_=EVENT_LOG)
log = u' 发单错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
event = Event(type_=EVENT_LOG)
log = u'撤单错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
event = Event(type_=EVENT_LOG)
log = u'结算信息确认完成'
event.dict_['log'] = log
self.__eventEngine.put(event)
event = Event(type_=EVENT_TDLOGIN)
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_POSITION)
event.dict_['data'] = data
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'持仓查询回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_ACCOUNT)
event.dict_['data'] = data
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'账户查询回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_INVESTOR)
event.dict_['data'] = data
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'合约投资者回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProduct(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""
合约查询回报
由于该回报的推送速度极快,因此不适合全部存入队列中处理,
选择先储存在一个本地字典中,全部收集完毕后再推送到队列中
(由于耗时过长目前使用其他进程读取)
"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_INSTRUMENT)
event.dict_['data'] = data
event.dict_['last'] = last
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'合约投资者回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfo(self, data, error, n, last):
"""查询结算信息回报"""
if last:
event = Event(type_=EVENT_LOG)
log = u'结算信息查询完成'
event.dict_['log'] = log
self.__eventEngine.put(event)
self.confirmSettlement() # 查询完成后立即确认结算信息
#----------------------------------------------------------------------
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
event = Event(type_=EVENT_LOG)
log = u'交易错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.__orderref = max(self.__orderref, int(newref))
# 常规报单事件
event1 = Event(type_=EVENT_ORDER)
event1.dict_['data'] = data
self.__eventEngine.put(event1)
# 特定合约行情事件
event2 = Event(type_=(EVENT_ORDER_ORDERREF+data['OrderRef']))
event2.dict_['data'] = data
self.__eventEngine.put(event2)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 常规成交事件
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = data
self.__eventEngine.put(event1)
# 特定合约成交事件
event2 = Event(type_=(EVENT_TRADE_CONTRACT+data['InstrumentID']))
event2.dict_['data'] = data
self.__eventEngine.put(event2)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
event = Event(type_=EVENT_LOG)
log = u'发单错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
event = Event(type_=EVENT_LOG)
log = u'撤单错误回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRtnInstrumentStatus(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnTradingNotice(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnErrorConditionalOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnExecOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnQuote(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQueryBankBalanceByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspFromBankToFutureByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspFromFutureToBankByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnOpenAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCancelAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnChangeAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def login(self, address, userid, password, brokerid):
"""连接服务器"""
self.__userid = userid
self.__password = password
self.__brokerid = brokerid
# 数据重传模式设为从本日开始
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
# 注册服务器地址
self.registerFront(address)
# 初始化连接,成功会调用onFrontConnected
self.init()
#----------------------------------------------------------------------
def getInstrument(self):
"""查询合约"""
self.__reqid = self.__reqid + 1
self.reqQryInstrument({}, self.__reqid)
#----------------------------------------------------------------------
def getAccount(self):
"""查询账户"""
self.__reqid = self.__reqid + 1
self.reqQryTradingAccount({}, self.__reqid)
#----------------------------------------------------------------------
def getInvestor(self):
"""查询投资者"""
self.__reqid = self.__reqid + 1
self.reqQryInvestor({}, self.__reqid)
#----------------------------------------------------------------------
def getPosition(self):
"""查询持仓"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqQryInvestorPosition(req, self.__reqid)
#----------------------------------------------------------------------
def sendOrder(self, instrumentid, exchangeid, price, pricetype, volume, direction, offset):
"""发单"""
self.__reqid = self.__reqid + 1
req = {}
req['InstrumentID'] = instrumentid
req['OrderPriceType'] = pricetype
req['LimitPrice'] = price
req['VolumeTotalOriginal'] = volume
req['Direction'] = direction
req['CombOffsetFlag'] = offset
self.__orderref = self.__orderref + 1
req['OrderRef'] = str(self.__orderref)
req['InvestorID'] = self.__userid
req['UserID'] = self.__userid
req['BrokerID'] = self.__brokerid
req['CombHedgeFlag'] = defineDict['THOST_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['THOST_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['THOST_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['THOST_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
self.reqOrderInsert(req, self.__reqid)
# 返回订单号,便于某些算法进行动态管理
return self.__orderref
#----------------------------------------------------------------------
def cancelOrder(self, instrumentid, exchangeid, orderref, frontid, sessionid):
"""撤单"""
self.__reqid = self.__reqid + 1
req = {}
req['InstrumentID'] = instrumentid
req['ExchangeID'] = exchangeid
req['OrderRef'] = orderref
req['FrontID'] = frontid
req['SessionID'] = sessionid
req['ActionFlag'] = defineDict['THOST_FTDC_AF_Delete']
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqOrderAction(req, self.__reqid)
#----------------------------------------------------------------------
def getSettlement(self):
"""查询结算信息"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqQrySettlementInfo(req, self.__reqid)
#----------------------------------------------------------------------
def confirmSettlement(self):
"""确认结算信息"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqSettlementInfoConfirm(req, self.__reqid) | mit |
wikimedia/operations-debs-contenttranslation-hfst | test/tools/hfst-compare.py | 2 | 1489 | import hfst, sys
import hfst_commandline
silent=False
harmonize=True
eliminate_flags=False
retval=0
short_getopts='sqHe'
long_getopts=['silent','quiet','do-not-harmonize','eliminate-flags']
errmsg='Usage: hfst-compare.py INFILE1 INFILE2'
options = hfst_commandline.hfst_getopt(short_getopts, long_getopts, 2, errmsg)
#for arg in argv[1:]:
for opt in options[0]:
if opt[0] == '-s' or opt[0] == '--silent' or opt[0] == '-q' or opt[0] == '--quiet':
silent = True
elif opt[0] == '-H' or opt[0] == '--do-not-harmonize':
harmonize = False
elif opt[0] == '-e' or opt[0] == '--eliminate-flags':
eliminate_flags=True
else:
pass # unknown options were already checked in hfst_getopt
streams = hfst_commandline.get_two_hfst_input_streams(options)
istr1 = streams[0][0]
istr1_name = streams[0][1]
istr2 = streams[1][0]
istr2_name = streams[1][1]
if (istr1.get_type() != istr2.get_type()):
raise RuntimeError('Error: transducer types differ in ' + istr1_name + ' and ' + istr2_name)
while((not istr1.is_eof()) and (not istr2.is_eof())):
tr1 = istr1.read()
tr2 = istr2.read()
if eliminate_flags:
tr1.eliminate_flags()
tr2.eliminate_flags()
if (tr1.compare(tr2, harmonize)):
if not silent:
print(tr1.get_name() + ' == ' + tr2.get_name())
else:
if not silent:
print(tr1.get_name() + ' != ' + tr2.get_name())
retval=1
istr1.close()
istr2.close()
sys.exit(retval)
| gpl-3.0 |
Beit-Hatfutsot/mojp-dbs-pipelines | tests/clearmash/test_add_entity_ids.py | 1 | 5414 | from datapackage_pipelines_mojp.clearmash.processors.add_entity_ids import Processor as AddEntityIdsProcessor
from tests.clearmash.mock_clearmash_api import MockClearmashApi
from tests.common import get_mock_settings, assert_conforms_to_schema
from datapackage_pipelines_mojp.common.processors.dump_to_sql import Processor as DumpToSqlProcessor
from tests.common import get_test_db_session
from sqlalchemy import Column, Text
import json
class MockDumpToSqlProcessor(DumpToSqlProcessor):
def __init__(self, *args, **kwargs):
self._jsonb_columns = []
super(MockDumpToSqlProcessor, self).__init__(*args, **kwargs)
def _get_new_db_session(self):
if not hasattr(self, "__test_db_session"):
self.__test_db_sssion = get_test_db_session()
return self.__test_db_sssion
def _descriptor_to_columns_and_constraints(self, *args):
columns, constraints, indexes = super(MockDumpToSqlProcessor, self)._descriptor_to_columns_and_constraints(*args)
columns = [self._filter_sqlalchemy_column(column) for column in columns]
return columns, constraints, indexes
def _filter_sqlalchemy_column(self, column):
# change JSONB to TEXT because sqlite doesn't support jsonb
if str(column.type) == "JSONB":
self._jsonb_columns.append(column.name)
column = Column(column.name, Text())
return column
def _filter_sqlalchemy_column_value(self, k, v):
if k in self._jsonb_columns:
return json.dumps(v)
else:
return v
def _filter_sqlalchemy_row(self, row):
return {k: self._filter_sqlalchemy_column_value(k, v) for k, v in row.items()}
def db_commit(self):
return [self._filter_sqlalchemy_row(row) for row in self._rows_buffer]
def db_connect(self, **kwargs):
pass
def _db_delete_session(self):
pass
class MockAddEntityIdsProcessor(AddEntityIdsProcessor):
def _get_clearmash_api_class(self):
return MockClearmashApi
def _get_new_db_session(self):
if not hasattr(self, "__test_db_session"):
self.__test_db_sssion = get_test_db_session()
return self.__test_db_sssion
def _get_folders_processor(self, parameters, schema, settings):
processor = MockDumpToSqlProcessor({"resource": "_",
"table": "clearmash-folders",
"commit-every": 0},
{"name": "_", "resources": [{"name": "_", "schema": schema}]},
[], settings)
processor.__test_db_sssion = self._get_new_db_session()
processor._filter_resource_init(schema)
return processor
if __name__ == "__main__":
MockAddEntityIdsProcessor.main()
def test_clearmash_add_entity_ids():
settings = get_mock_settings(OVERRIDE_CLEARMASH_COLLECTIONS="",
CLEARMASH_MAX_RETRIES=0,
CLEARMASH_RETRY_SLEEP_SECONDS=0)
parameters = {"add-resource": "entity-ids", "folders-table": "clearmash-folders"}
datapackage = {"resources": []}
resources = []
datapackage, resources, stats = MockAddEntityIdsProcessor(parameters, datapackage, resources, settings).spew()
resources = list(resources)
assert len(resources) == 1
resource = list(resources[0])
assert len(resource) == 50
assert_conforms_to_schema(AddEntityIdsProcessor._get_schema(), resource[0])
assert resource[0] == {'collection': 'familyNames', 'folder_id': 45,
'item_id': 115306,
'name': 'בן עמרה',
'metadata': {'CommunityId': 6,
'CreatorPersonId': 2,
'FileType': 0,
'Id': 115306,
'IsBookmarked': False,
'IsFolder': False,
'IsLiked': False,
'IsPublished': True,
'IsReadOnly': False,
'IsSearchable': True,
'LikesCount': 0,
'LockedByPersonId': 0,
'LockedByPersonName': '',
'ModifiedByPersonId': 2,
'ModifiedByPersonName': 'Admin ',
'Name': 'בן עמרה',
'ParentFolderId': -1,
'PermissionType': 5,
'SizeInBytes': 0,
'UserCanPublish': True}}
assert {k: resource[10][k] for k in ["collection", "item_id"]} == {'collection': 'places', 'item_id': 115325}
assert {k: resource[20][k] for k in ["collection", "item_id"]} == {'collection': 'movies', 'item_id': 115414}
assert {k: resource[30][k] for k in ["collection", "item_id"]} == {'collection': 'personalities', 'item_id': 115318}
assert {k: resource[40][k] for k in ["collection", "item_id"]} == {'collection': 'photoUnits', 'item_id': 115301}
| mit |
Galarzaa90/NabBot | cogs/serverlog.py | 1 | 22075 | # Copyright 2019 Allan Galarza
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import logging
from enum import Enum
from typing import Any, List, Optional
import discord
from discord.ext import commands
from nabbot import NabBot
from .utils import get_region_string, get_user_avatar, CogUtils
from .utils.database import DbChar
from .utils.tibia import NabChar, get_voc_abb_and_emoji
log = logging.getLogger("nabbot")
COLOUR_CHAR_REGISTERED = discord.Colour.dark_teal()
COLOUR_CHAR_UNREGISTERED = discord.Colour.dark_magenta()
COLOUR_CHAR_RENAME = discord.Colour.blurple()
COLOUR_CHAR_TRANSFERRED = discord.Colour.greyple()
COLOUR_MEMBER_JOINED = discord.Colour.green()
COLOUR_MEMBER_JOINED_BOT = discord.Colour.dark_green()
COLOUR_MEMBER_UPDATE = discord.Colour.blue()
COLOUR_MEMBER_KICK = discord.Colour.red()
COLOUR_MEMBER_REMOVE = discord.Colour(0xffff00) # yellow
COLOUR_MEMBER_BAN = discord.Colour.dark_red()
COLOUR_MEMBER_UNBAN = discord.Colour.orange()
COLOUR_EMOJI_UPDATE = discord.Colour.dark_orange()
COLOUR_GUILD_UPDATE = discord.Colour.purple()
COLOUR_WATCHLIST_DELETE = discord.Colour.dark_gold()
COLOUR_AUTOROLE_DELETE = discord.Colour.dark_gold()
COLOUR_JOINABLEROLE_DELETE = discord.Colour.dark_gold()
class ChangeType(Enum):
OWNER = "owner"
GUILD = "guild"
WORLD = "world"
NAME = "name"
class ServerLog(commands.Cog, CogUtils):
def __init__(self, bot: NabBot):
self.bot = bot
def cog_unload(self):
log.info(f"{self.tag} Unloading cog")
# region Custom Events
@commands.Cog.listener()
async def on_characters_registered(self, user: discord.User, added: List[NabChar], updated: List[DbChar],
author: discord.User = None):
"""Called when a user registers new characters
Announces the new characters in the server log of the relevant servers."""
user_guilds = self.bot.get_user_guilds(user.id)
embed = discord.Embed(colour=COLOUR_CHAR_REGISTERED)
embed.set_author(name=f"{user.name}#{user.discriminator}", icon_url=get_user_avatar(user))
for char in added:
await self.add_character_history(char.id, ChangeType.OWNER, 0, user.id, author)
for char in updated:
await self.add_character_history(char.id, ChangeType.OWNER, char.user_id, user.id, author)
if author:
embed.set_footer(text=f"{author.name}#{author.discriminator}", icon_url=get_user_avatar(author))
for guild in user_guilds:
world = self.bot.tracked_worlds.get(guild.id)
_added = [c for c in added if c.world == world]
_updated = [c for c in updated if c.world == world]
if not _added and not _updated:
continue
description = f"{user.mention} registered the following characters:"
for char in _added:
tibia_guild = char.guild_name or "No guild"
voc = get_voc_abb_and_emoji(char.vocation)
description += f"\n‣ {char.name} - Level {char.level} {voc} - **{tibia_guild}**"
for char in _updated:
voc = get_voc_abb_and_emoji(char.vocation)
tibia_guild = char.guild or "No guild"
description += f"\n‣ {char.name} - Level {abs(char.level)} {voc} - **{tibia_guild}** (Reassigned)"
embed.description = description
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_character_unregistered(self, user: discord.user, char: DbChar, author: discord.User = None):
"""Called when a user unregisters a character.
Announces the unregistered in the server log of the relevant servers."""
user_guilds = self.bot.get_user_guilds(user.id)
embed = discord.Embed(colour=COLOUR_CHAR_UNREGISTERED)
embed.set_author(name=f"{user.name}#{user.discriminator}", icon_url=get_user_avatar(user))
voc = get_voc_abb_and_emoji(char.vocation)
tibia_guild = char.guild or "No guild"
await self.add_character_history(char.id, ChangeType.OWNER, user.id, 0)
if author is not None:
embed.set_footer(text=f"{author.name}#{author.discriminator}", icon_url=get_user_avatar(author))
for guild in user_guilds:
world = self.bot.tracked_worlds.get(guild.id)
if char.world != world:
continue
embed.description = f"{user.mention} unregistered:" \
f"\n‣ {char.name} - Level {abs(char.level)} {voc} - **{tibia_guild}**"
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_character_rename(self, char: NabChar, old_name: str):
"""Called when a character is renamed.
Announces it in the server log of the relevant servers."""
user_id = char.owner_id
new_name = char.name
user_guilds = self.bot.get_user_guilds(user_id)
await self.add_character_history(char.id, ChangeType.NAME, old_name, char.name)
for guild in user_guilds:
if self.bot.tracked_worlds.get(guild.id) != char.world:
continue
member = guild.get_member(user_id)
if member is None:
continue
embed = discord.Embed(colour=COLOUR_CHAR_RENAME,
description=f"A character of {member.mention} changed name.\n"
f"‣ **{old_name}** -> **{new_name}**")
embed.set_author(name=f"{member.name}#{member.discriminator}", icon_url=get_user_avatar(member))
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_character_transferred(self, char: NabChar, old_world: str):
"""Called when a character switches world.
Announces it in the server log of the relevant servers, i.e. servers tracking the former or new world."""
user_id = char.owner_id
user_guilds = self.bot.get_user_guilds(user_id)
voc = get_voc_abb_and_emoji(char.vocation)
await self.add_character_history(char.id, ChangeType.WORLD, old_world, char.world)
for guild in user_guilds:
tracked_world = self.bot.tracked_worlds.get(guild.id)
if not(char.world == tracked_world or old_world == tracked_world):
continue
member = guild.get_member(user_id)
if member is None:
continue
embed = discord.Embed(colour=COLOUR_CHAR_TRANSFERRED,
description=f"A character of {member.mention} transferred:\n"
f"‣ **{char.name}** - Level {char.level} {voc} - "
f"{old_world} -> {char.world}")
embed.set_author(name=f"{member.name}#{member.discriminator}", icon_url=get_user_avatar(member))
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_character_guild_change(self, char: NabChar, old_guild: str):
"""Called when a character is renamed.
Adds an entry to the character's history."""
await self.add_character_history(char.id, ChangeType.GUILD, old_guild, char.guild_name)
@commands.Cog.listener()
async def on_role_auto_deleted(self, role: discord.Role):
embed = discord.Embed(title="Automatic role deleted", colour=COLOUR_AUTOROLE_DELETE,
description=f"Automatic role **{role.name}** deleted.")
entry = await self.get_audit_entry(role.guild, discord.AuditLogAction.role_delete, role)
if entry:
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}", icon_url=get_user_avatar(entry.user))
await self.bot.send_log_message(role.guild, embed=embed)
@commands.Cog.listener()
async def on_role_joinable_deleted(self, role: discord.Role):
embed = discord.Embed(title="Group deleted", colour=COLOUR_JOINABLEROLE_DELETE,
description=f"Joinable role **{role.name}** deleted.")
entry = await self.get_audit_entry(role.guild, discord.AuditLogAction.role_delete, role)
if entry and entry.user.id != self.bot.user.id:
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}", icon_url=get_user_avatar(entry.user))
await self.bot.send_log_message(role.guild, embed=embed)
@commands.Cog.listener()
async def on_watchlist_deleted(self, channel: discord.TextChannel, count: int):
"""Called when a watchlist channel is deleted.
Announces it in the server log.
If the bot has permission to see the audit log, it will also show the user that deleted it."""
embed = discord.Embed(title="Watchlist channel deleted", colour=COLOUR_WATCHLIST_DELETE,
description=f"Channel `#{channel.name}` was deleted. **{count}** entries were deleted.")
entry = await self.get_audit_entry(channel.guild, discord.AuditLogAction.channel_delete, channel)
if entry:
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}", icon_url=get_user_avatar(entry.user))
await self.bot.send_log_message(channel.guild, embed=embed)
# endregion
# region Discord Events
@commands.Cog.listener()
async def on_guild_emojis_update(self, guild: discord.Guild, before: List[discord.Emoji],
after: List[discord.Emoji]):
"""Called every time an emoji is created, deleted or updated."""
def emoji_repr(_emoji: discord.Emoji):
fix = ":" if _emoji.require_colons else ""
return f"{fix}{_emoji.name}{fix}"
embed = discord.Embed(colour=COLOUR_EMOJI_UPDATE)
emoji: discord.Emoji = None
# Emoji deleted
if len(before) > len(after):
emoji = discord.utils.find(lambda e: e not in after, before)
if emoji is None:
return
embed.set_author(name=f"{emoji_repr(emoji)} (ID: {emoji.id})", icon_url=emoji.url)
embed.description = f"Emoji deleted."
action = discord.AuditLogAction.emoji_delete
# Emoji added
elif len(after) > len(before):
emoji = discord.utils.find(lambda e: e not in before, after)
if emoji is None:
return
embed.set_author(name=f"{emoji_repr(emoji)} (ID: {emoji.id})", icon_url=emoji.url)
embed.description = f"Emoji added."
action = discord.AuditLogAction.emoji_create
else:
old_name = ""
for new_emoji in after:
for old_emoji in before:
if new_emoji == old_emoji and new_emoji.name != old_emoji.name:
old_name = old_emoji.name
emoji = new_emoji
break
if emoji is None:
return
embed.set_author(name=f"{emoji_repr(emoji)} (ID: {emoji.id})", icon_url=emoji.url)
embed.description = f"Emoji renamed from `{old_name}` to `{emoji.name}`"
action = discord.AuditLogAction.emoji_update
if emoji:
entry = await self.get_audit_entry(guild, action, emoji)
if entry:
embed.set_footer(text="{0.name}#{0.discriminator}".format(entry.user),
icon_url=get_user_avatar(entry.user))
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_guild_update(self, before: discord.Guild, after: discord.Guild):
"""Called every time a guild is updated"""
embed = discord.Embed(colour=COLOUR_GUILD_UPDATE)
embed.set_author(name=after.name, icon_url=after.icon_url)
changes = True
if before.name != after.name:
embed.description = f"Name changed from **{before.name}** to **{after.name}**"
elif before.region != after.region:
embed.description = "Region changed from **{0}** to **{1}**".format(get_region_string(before.region),
get_region_string(after.region))
elif before.icon_url != after.icon_url:
embed.description = "Icon changed"
embed.set_thumbnail(url=after.icon_url)
elif before.owner_id != after.owner_id:
embed.description = f"Ownership transferred to {after.owner.mention}"
else:
changes = False
if changes:
entry = await self.get_audit_entry(after, discord.AuditLogAction.guild_update)
if entry:
icon_url = get_user_avatar(entry.user)
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}", icon_url=icon_url)
await self.bot.send_log_message(after, embed=embed)
@commands.Cog.listener()
async def on_member_ban(self, guild: discord.Guild, user: discord.User):
"""Called when a member is banned from a guild."""
embed = discord.Embed(description="Banned", colour=COLOUR_MEMBER_BAN)
embed.set_author(name="{0.name}#{0.discriminator}".format(user), icon_url=get_user_avatar(user))
# If bot can see audit log, we can get more details of the ban
entry = await self.get_audit_entry(guild, discord.AuditLogAction.ban, user)
if entry:
embed.set_footer(text="{0.name}#{0.discriminator}".format(entry.user),
icon_url=get_user_avatar(entry.user))
if entry.reason:
embed.description += f"\n**Reason:** {entry.reason}"
await self.bot.send_log_message(guild, embed=embed)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
""" Called when a member joins a guild (server) the bot is in."""
embed = discord.Embed(description=f"{member.mention} joined.", colour=COLOUR_MEMBER_JOINED,
timestamp=member.created_at)
embed.set_author(name=f"{member.name}#{member.discriminator} (ID: {member.id})",
icon_url=get_user_avatar(member))
embed.set_footer(text="Discord user since")
if member.bot:
embed.colour = COLOUR_MEMBER_JOINED_BOT
embed.description = f"Bot {member.mention} added."
return await self.bot.send_log_message(member.guild, embed=embed)
world = self.bot.tracked_worlds.get(member.guild.id)
# If server is not tracking worlds, we don't check the database
if world is None:
return await self.bot.send_log_message(member.guild, embed=embed)
# Check if user already has characters registered and announce them on log_channel
# This could be because he rejoined the server or is in another server tracking the same worlds
rows = await self.bot.pool.fetch("""SELECT name, vocation, abs(level) as level, guild FROM "character"
WHERE user_id = $1 AND world = $2 ORDER BY level DESC""", member.id, world)
if rows:
self.bot.dispatch("character_change", member.id)
characters = ""
for c in rows:
voc = get_voc_abb_and_emoji(c["vocation"])
guild = c["guild"] or "No guild"
characters += f"\n\u2023 {c['name']} - Level {c['level']} {voc} - **{guild}**"
embed.add_field(name="Registered characters", value=characters)
await self.bot.send_log_message(member.guild, embed=embed)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
"""Called when a member leaves or is kicked from a guild."""
bot_member: discord.Member = member.guild.me
embed = discord.Embed(description="Left the server or was kicked", colour=COLOUR_MEMBER_REMOVE)
embed.set_author(name=f"{member.name}#{member.discriminator} (ID: {member.id})",
icon_url=get_user_avatar(member))
tracked_world = self.bot.tracked_worlds.get(member.guild.id)
rows = await self.bot.pool.fetch("""SELECT name, vocation, abs(level) as level, guild FROM "character"
WHERE user_id = $1 AND world = $2""", member.id, tracked_world)
registered_chars = "\nRegistered characters:" if rows else ""
for char in rows:
voc = get_voc_abb_and_emoji(char["vocation"])
tibia_guild = dict(char).get("guild", "No guild")
registered_chars += f"\n‣ {char['name']} - Level {char['level']} {voc} - **{tibia_guild}** (Reassigned)"
# If bot can see audit log, he can see if it was a kick or member left on it's own
if bot_member.guild_permissions.view_audit_log:
entry = await self.get_audit_entry(member.guild, discord.AuditLogAction.kick, member)
if entry:
embed.description = "Kicked"
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}",
icon_url=get_user_avatar(entry.user))
embed.colour = COLOUR_MEMBER_KICK
if entry.reason:
embed.description += f"\n**Reason:** {entry.reason}"
embed.description += registered_chars
await self.bot.send_log_message(member.guild, embed=embed)
return
embed.description = "Left the server"
await self.bot.send_log_message(member.guild, embed=embed)
return
# Otherwise, we are not certain
await self.bot.send_log_message(member.guild, embed=embed)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member):
"""Called every time a member is updated"""
if before.nick != after.nick:
embed = discord.Embed(description=f"{after.mention}: ", colour=COLOUR_MEMBER_UPDATE)
embed.set_author(name=f"{after.name}#{after.discriminator} (ID: {after.id})",
icon_url=get_user_avatar(after))
if before.nick is None:
embed.description += f"Nickname set to **{after.nick}**"
elif after.nick is None:
embed.description += f"Nickname **{before.nick}** deleted"
else:
embed.description += f"Nickname changed from **{before.nick}** to **{after.nick}**"
entry = await self.get_audit_entry(after.guild, discord.AuditLogAction.member_update, after)
if entry and entry.user.id != after.id:
icon_url = get_user_avatar(entry.user)
embed.set_footer(text=f"{entry.user.name}#{entry.user.discriminator}", icon_url=icon_url)
await self.bot.send_log_message(after.guild, embed=embed)
@commands.Cog.listener()
async def on_member_unban(self, guild: discord.Guild, user: discord.User):
"""Called when a member is unbanned from a guild"""
embed = discord.Embed(description="Unbanned", colour=COLOUR_MEMBER_UNBAN)
embed.set_author(name="{0.name}#{0.discriminator} (ID {0.id})".format(user), icon_url=get_user_avatar(user))
entry = await self.get_audit_entry(guild, discord.AuditLogAction.unban, user)
if entry:
embed.set_footer(text="{0.name}#{0.discriminator}".format(entry.user),
icon_url=get_user_avatar(entry.user))
await self.bot.send_log_message(guild, embed=embed)
# endregion
@staticmethod
async def get_audit_entry(guild: discord.Guild, action: discord.AuditLogAction,
target: Any = None) -> Optional[discord.AuditLogEntry]:
"""Gets an audit log entry of the specified action type.
The type of the action depends on the action.
:param guild: The guild where the audit log will be checked.
:param action: The action to filter.
:param target: The target to filter.
:return: The first matching audit log entry if found.
"""
if not guild.me.guild_permissions.view_audit_log:
return
now = dt.datetime.utcnow()
after = now - dt.timedelta(0, 5)
async for entry in guild.audit_logs(limit=10, oldest_first=False, action=action, after=after):
if abs((entry.created_at - now)) >= dt.timedelta(seconds=5):
break
if target is not None and entry.target.id == target.id:
return entry
async def add_character_history(self, char_id: int, change_type: ChangeType, before, after, author=None):
"""Adds a character history entry to the database.
:param char_id: The affected character's id.
:param change_type: The type of change.
:param before: The previous value.
:param after: The new value.
:param author: The user that caused this change.
"""
author_id = author.id if author else None
await self.bot.pool.execute("""INSERT INTO character_history(character_id, change_type, before, after, user_id)
values($1, $2, $3, $4, $5)""",
char_id, change_type.value, before, after, author_id)
def setup(bot):
bot.add_cog(ServerLog(bot))
| apache-2.0 |
midori1/midorinoblog | site-packages/django/middleware/http.py | 50 | 1672 | from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| apache-2.0 |
jhancock93/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyString/autorestswaggerbatservice/auto_rest_swagger_bat_service.py | 5 | 2332 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.string_operations import StringOperations
from .operations.enum_operations import EnumOperations
from . import models
class AutoRestSwaggerBATServiceConfiguration(Configuration):
"""Configuration for AutoRestSwaggerBATService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
if not base_url:
base_url = 'http://localhost'
super(AutoRestSwaggerBATServiceConfiguration, self).__init__(base_url)
self.add_user_agent('autorestswaggerbatservice/{}'.format(VERSION))
class AutoRestSwaggerBATService(object):
"""Test Infrastructure for AutoRest Swagger BAT
:ivar config: Configuration for client.
:vartype config: AutoRestSwaggerBATServiceConfiguration
:ivar string: String operations
:vartype string: .operations.StringOperations
:ivar enum: Enum operations
:vartype enum: .operations.EnumOperations
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
self.config = AutoRestSwaggerBATServiceConfiguration(base_url)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.string = StringOperations(
self._client, self.config, self._serialize, self._deserialize)
self.enum = EnumOperations(
self._client, self.config, self._serialize, self._deserialize)
| mit |
EtiCui/Msc-UdeS | dataAnalysis/endtoend_distance.py | 1 | 3264 | #!/usr/bin/python
"""
These function can be used to calculate the average end to end distances of a backbone from a lammmps output.
Usage:
# dump_dataframe must be in pythonpath or working directory
from endtoend_distance import rf
rf,rf_std = rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100)
Requirement:
numpy
pandas
dump_dataframe.py
scipy
Limitations:
Coordinates must be unwrapped (ex:xu,yu,zu)
Each dump must be a file
TODO:
Function to read a trajectory from a single file
"""
from dump_dataframe import read_dump
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from glob import glob
def endtoend(filename, atoms_per_polymer, number_of_chains):
"""
Function to calculate the end to end distances of each polymer chains from a dump.
Args:
----
filename(string): Filename of the dump
atoms_per_polymer(int): The number of particles/atoms in a single chains
number_of_chains(int): Number of chains in the system
Returns:
----
endtoend_dists(array): Numpy array with the end-to-end distance for each chains
"""
# Read the dump, coordinates must be unwrapped
dump = read_dump(filename, wrap=False)
# Select only the useful columns
wanted_columns = ["xu", "yu", "zu"]
rf_df = dump["atom_df"][wanted_columns]
# Create an empty array which will contains the distances
endtoend_dists = np.zeros(number_of_chains)
i = 0
while i < number_of_chains:
# Calculate the distance between the fist and the last atoms in the
# backbone
endtoend_dists[i] = pdist(
rf_df.loc[[1 + atoms_per_polymer * i, atoms_per_polymer + atoms_per_polymer * i]])
i += 1
return endtoend_dists
def rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100):
"""
Function to calculate the Rf of a lammps trajectory.
Args:
----
first_frame(int): The first frame desired in the trajectory
last_frame(int): The frame to stop
trajectory_step(int): calculate only for each # of files
atoms_per_polymer(int): The number of atoms in the polymer chain
number_of_chains(int): The number of chains in the system
Returns:
----
Rfmean(float): The average end to end distances in the trajectory
Rfstd(float): The standard deviation of the Rf
"""
# List of all the dump in the trajectory
complete_trajectory = glob("*dump*")
# sort the list according to the number in the filename
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame:trajectory_step]
#create a empty numpy array to contains the end to end distances for each chain (columns)
#for each step (time)
rf = np.zeros((len(desired_trajectory),number_of_chains))
i=0
# for each file in the trajectory
for f in desired_trajectory:
#calculate the end to end distances for each chain
rf[i] = endtoend(f, atoms_per_polymer, number_of_chains)
i+=1
#return the mean average distances with its standard deviation
return rf.mean(),rf.std()
| mit |
timothycrosley/WebBot | instant_templates/create_webbot_appengine/WebElements/HTML5.py | 3 | 2964 | '''
HTML5.py
Contains complex elements that take advantage of features unique to modern HTML5 browsers,
and therefore will only work on more recent systems
Copyright (C) 2013 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from . import Base, Display, Factory, Layout
from .MethodUtils import CallBack
from .MultiplePythonSupport import *
Factory = Factory.Factory("HTML5")
class FileUploader(Layout.Vertical):
"""
An HTML5 brower only file-uploader - allows dragging and dropping files to upload them. :: IN DEVELOPMENT ::
"""
__slots__ = ('dropArea', 'dropLabel', 'preview', 'dropIndicator', 'files', 'statusBar')
def _create(self, id, name=None, parent=None, **kwargs):
Layout.Vertical._create(self, id, name, parent, **kwargs)
self.addClass("WDropArea")
self.addClass("WEmpty")
self.statusBar = self.addChildElement(Layout.Horizontal(id + "StatusBar"))
self.statusBar.addClass("WStatusBar")
self.statusBar.hide()
self.dropIndicator = self.statusBar.addChildElement(Display.Image())
self.dropIndicator.setProperty('src', 'images/throbber.gif')
self.dropIndicator.addClass("WDropIndicator")
self.dropLabel = self.statusBar.addChildElement(Display.Label(id + "DropLabel"))
self.dropLabel.setText("Drop Files Here")
self.dropLabel.addClass("WDropLabel")
self.files = self.addChildElement(Layout.Horizontal(id + "Files"))
self.files.addClass("WFiles")
baseFile = self.files.addChildElement(Layout.Vertical(id + "File"))
baseFile.addClass("WFile")
imageContainer = baseFile.addChildElement(Layout.Box())
imageContainer.addClass("WImageContainer")
preview = imageContainer.addChildElement(Display.Image())
preview.addClass("WThumbnail")
name = baseFile.addChildElement(Display.Label())
name.addClass("WFileName")
baseFile.hide()
self.addScript(CallBack(self, 'jsConnections'))
def jsConnections(self):
"""
Adds the necessary javascript to set up the file uploader client-side.
"""
return "WebElements.buildFileOpener('%s');" % self.fullId()
Factory.addProduct(FileUploader)
| gpl-2.0 |
changev/RackHD | test/deploy/rackhd_ha_resource_install.py | 12 | 11253 | """
Copyright (c) 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script prepares the RackHD HA cluster resources
This script perform the following functions:
- configure mongo and rabbitmq resource
- configure mongo and rabbitmq virtual ips
- colocate mongo and rabbitmq resource with virtual ip
- create mongo replica set
- create rabbitmq mirrored queue policies
usage:
python run_tests.py -stack <stack ID> -test deploy/rackhd_ha_resource_install.py -numvms <num>
"""
from jinja2 import Environment, FileSystemLoader
import os
import time
import unittest
import fit_path # NOQA: unused import
import fit_common
nodelist = [] # list of active nodes in cluster
numvms = int(fit_common.fitargs()['numvms'])
err = []
numrs = numvms - 1 # number of mongo resource
vip_dict = {'mongo': [], 'rabbit': [], 'rackhd': []}
class rackhd_ha_resource_install(unittest.TestCase):
longMessage = True
def setUp(self):
# collect active nodes in cluster
for vmnum in range(1, numvms + 1):
command = "crm_mon -X | grep 'node{}.*online=.true' -q".format(vmnum)
status = fit_common.remote_shell(command, vmnum=vmnum)['exitcode']
if status != 0:
err.append('node{} is offline'.format(vmnum))
else:
nodelist.append(vmnum)
def configure_virtual_ip_resource(self, vmnum, ip, rsc_ip):
# check interface for virtual ip
if "172.31.128" not in ip:
nic = "ens160"
else:
nic = "ens192"
command = ("crm configure primitive {} ocf:heartbeat:IPaddr2 " +
"params ip='{}' nic='{}' " +
"op monitor interval='10s' meta is-managed='true'").format(rsc_ip, ip, nic)
rc = fit_common.remote_shell(command, vmnum=vmnum)['exitcode']
return rc == 0
def get_southbound_network(self):
endpoints = fit_common.fitrackhd()['httpEndpoints']
southbound = self.find_southbound(endpoints)
if southbound and 'address' in southbound:
address = southbound["address"]
addrsplit = address.split('.')
return("{}.{}.{}".format(addrsplit[0], addrsplit[1], addrsplit[2]))
return None
def find_southbound(self, httpEndpoints):
for i in httpEndpoints:
if i["routers"] == "southbound-api-router":
return i
return None
def create_mongo_config(self, ip_list):
template_folder = './config_templates'
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template("mongo_init.bash")
config = {'_id': 'mongo_rs', 'members': []}
for idx, val in enumerate(ip_list):
config['members'].append({'_id': idx, 'host': '{}:27017'.format(val)})
rendered = template.render(mongo_list=config, mongo_addr=ip_list[0])
return rendered
def test01_install_mongo_resource(self):
# create resource on first active node
vmnum = nodelist[0]
sb_net = self.get_southbound_network()
self.assertIsNotNone(sb_net, "Could not find southbound address")
for mongo in range(1, numrs + 1):
# start mongo container as pacemaker resource
rsc = 'docker_mongo_{}'.format(mongo)
command = ("crm configure primitive {} ocf:heartbeat:docker " +
"params allow_pull=true image='registry.hwimo.lab.emc.com/mongo' " +
"run_opts=\\\'--privileged=true --net='host' -d -p 27017:27017\\\' " +
"run_cmd=\\\'--replSet mongo_rs --logpath /var/log/mongodb/mongod.log\\\' " +
"meta is-managed='true'").format(rsc)
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "{} resource failure.".format(rsc))
# create mongo virtual ip resource
ip = '{}.12{}'.format(sb_net, mongo)
vip_dict['mongo'].append(ip)
rsc_ip = 'mongo_addr_{}'.format(mongo)
self.assertTrue(self.configure_virtual_ip_resource(vmnum, ip, rsc_ip), "{} resource failure.".format(rsc_ip))
# colocate mongo and virtual IPs
mongo_cls = 'mongo{}'.format(mongo)
command = "crm configure colocation {} inf: {} {}".format(mongo_cls, rsc, rsc_ip)
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "colocation failure")
# create mongo replica config
mongo_rep = open('mongo_replica_init.bash', 'w')
mongo_rep.write(self.create_mongo_config(vip_dict['mongo']))
mongo_rep.close()
# copy file to ora
fit_common.scp_file_to_host('mongo_replica_init.bash', vmnum)
os.remove('mongo_replica_init.bash')
fit_common.remote_shell("chmod 777 mongo_replica_init.bash", vmnum=vmnum)['exitcode']
# run script to initiate replica set
self.assertEqual(fit_common.remote_shell("./mongo_replica_init.bash", vmnum=vmnum)['exitcode'],
0, "Mongo replica initiation failure")
def create_amqp_config_file(self):
# AMQP config file
result = ""
for rsnum in range(1, numrs + 1):
result += " 'rabbit@rabbit{}',".format(rsnum)
rabbit_list = result.rstrip(",")
rabbitmq_config = open('rabbitmq.config', 'w')
rabbitmq_config.write("[ { rabbit, [{ loopback_users, [ ] },{cluster_nodes, {[%s], disc}} ] } ]." % rabbit_list)
rabbitmq_config.close()
def create_rabbitmq_hostname_config(self, ip_list):
# create rabbitmq hosts
hosts_conf = open('hosts-conf', 'w')
for rsnum in range(1, numrs + 1):
line = '{}\trabbit{}\n'.format(ip_list[rsnum - 1], rsnum)
hosts_conf.write(line)
hosts_conf.close()
def get_rabbitmq_cluster_policy(self):
# create file for policies
template_folder = './config_templates'
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template("rabbitmq_policy.bash")
rendered = template.render()
rabbitmq_policy = open('rabbitmq.bash', 'w')
rabbitmq_policy.write(rendered)
rabbitmq_policy.close()
def test02_install_rabbitmq_resource(self):
# install rabbitmq config on cluster nodes
self.create_amqp_config_file()
for vmnum in range(1, numvms + 1):
# copy file to ORA
fit_common.scp_file_to_host('rabbitmq.config', vmnum)
self.assertEqual(fit_common.remote_shell('mkdir -p /docker;cp rabbitmq.config /docker/', vmnum=vmnum)['exitcode'],
0, "rabbitMQ Config failure.")
os.remove('rabbitmq.config')
# create resource on first active node
vmnum = nodelist[0]
sb_net = self.get_southbound_network()
self.assertIsNotNone(sb_net, "Could not find southbound address")
rabbit_rsc_list = []
# set rabbitmq resource
for rabbit in range(1, numrs + 1):
rsc = 'docker_rabbit_{}'.format(rabbit)
rabbit_rsc_list.append(rsc)
command = ("crm configure primitive {} ocf:heartbeat:docker " +
"params allow_pull=true image='registry.hwimo.lab.emc.com/rabbitmq:management' " +
"run_opts=\\\'--privileged=true --net='host' -d " +
"-v /docker/rabbitmq.config:/etc/rabbitmq/rabbitmq.config " +
"-p 8080:15672 -p 4369:4369 -p 25672:25672 -p 5672:5672 -p 35197:35197 " +
"-e RABBITMQ_NODENAME=rabbit@rabbit{} "
"-e RABBITMQ_ERLANG_COOKIE=secret_cookie_example\\\'").format(rsc, rabbit)
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "{} resource failure".format(rsc))
# configure virtual ip for rabbitmq resource
ip = '{}.13{}'.format(sb_net, rabbit)
vip_dict['rabbit'].append(ip)
rsc_ip = 'rabbit_addr_{}'.format(rabbit)
self.assertTrue(self.configure_virtual_ip_resource(vmnum, ip, rsc_ip), "{} resource failure.".format(rsc_ip))
# colocate rabbitmq resource and virtual ip
rabbit_cls = 'rabbit{}'.format(rabbit)
command = "crm configure colocation {} inf: {} {}".format(rabbit_cls, rsc, rsc_ip)
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'],
0, "{} and {} colocation failure".format(rsc, rsc_ip))
# create rabbitmq hostname on each node
self.create_rabbitmq_hostname_config(vip_dict['rabbit'])
# copy file to ORA
for vmnum in range(1, numvms + 1):
fit_common.scp_file_to_host('hosts-conf', vmnum)
# Clean out the previous entries to be idempotent
command = "grep -v rabbit /etc/hosts > hosts"
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'],
0, "Hosts Config failure; Cleaning out previous entries")
# Add the new entries
self.assertEqual(fit_common.remote_shell('cat hosts-conf >> hosts', vmnum=vmnum)['exitcode'],
0, "Hosts Config failure; Adding new entries")
# Move the new file into place
self.assertEqual(fit_common.remote_shell('mv hosts /etc/hosts', vmnum=vmnum)['exitcode'],
0, "Hosts Config failure; Moving new file into place")
os.remove('hosts-conf')
# anti colocation between rabbit resources
for rsc in range(len(rabbit_rsc_list)):
for r_rsc in range(rsc + 1, len(rabbit_rsc_list)):
command = "crm configure colocation rabbit_anti_{0}{1} -inf: {2} {3}" \
.format(rsc + 1, r_rsc + 1, rabbit_rsc_list[rsc], rabbit_rsc_list[r_rsc])
self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'],
0, "{} and {} anti colocation failure".format(rabbit_rsc_list[rsc], rabbit_rsc_list[r_rsc]))
# restart rabbitmq resource
for rsc in rabbit_rsc_list:
command = "crm resource restart {}".format(rsc)
fit_common.remote_shell(command, vmnum=vmnum)['exitcode']
time.sleep(5)
# collect hostname for first rabbitmq resource
command = "crm_resource -W -r {} | sed \\\'s/.*node//g\\\'".format(rabbit_rsc_list[0])
rc = fit_common.remote_shell(command, vmnum=vmnum)
# clean out login stuff
splitnode = rc['stdout'].split('\n')
for item in splitnode:
if "assword" not in item and item.split(" ")[0]:
node = int(item)
self.get_rabbitmq_cluster_policy()
# copy file to ORA
fit_common.scp_file_to_host('rabbitmq.bash', vmnum=node)
fit_common.remote_shell("chmod 777 rabbitmq.bash", vmnum=node)
self.assertEqual(fit_common.remote_shell("./rabbitmq.bash", vmnum=node)['exitcode'],
0, "Rabbitmq mirrored queue policy failure.")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
bukalov/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/input.py | 137 | 84791 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import optparse
import os.path
import re
import shlex
import subprocess
import sys
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
def IsPathSection(section):
if section in path_sections or \
section.endswith('_dir') or section.endswith('_dirs') or \
section.endswith('_file') or section.endswith('_files') or \
section.endswith('_path') or section.endswith('_paths'):
return True
return False
# base_non_configuraiton_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'link_languages',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'rules',
'run_as',
'sources',
'suppress_wildcard',
'target_name',
'test',
'toolset',
'toolsets',
'type',
'variants',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Controls how the generator want the build file paths.
absolute_build_file_paths = False
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0],0)
def CheckNode(node, level):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise KeyError, "Key '" + key + "' repeated at level " + \
repr(level)
dict[key] = CheckNode(c[n + 1], level + 1)
return dict
elif isinstance(node, List):
c = node.getChildren()
list = []
for child in c:
list.append(CheckNode(child, level + 1))
return list
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise Exception("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'" % include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
global multiple_toolsets
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check):
global absolute_build_file_paths
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d
# If the generator needs absolue paths, then do so.
if absolute_build_file_paths:
build_file_path = os.path.abspath(build_file_path)
if build_file_path in data['target_build_files']:
# Already loaded.
return
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'" % build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise KeyError, build_file_path + ' must not contain included_files key'
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(build_file_data, False, variables,
build_file_path)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
index = 0
if 'targets' in build_file_data:
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index = index + 1
else:
raise Exception, \
"Unable to find targets in build file %s" % build_file_path
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
other_build_file = \
gyp.common.ResolveTarget(build_file_path, dependency, None)[0]
try:
LoadTargetBuildFile(other_build_file, data, aux_data, variables,
includes, depth, check)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
return data
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
def FindEnclosingBracketGroup(input):
brackets = { '}': '{',
']': '[',
')': '(', }
stack = []
count = 0
start = -1
for char in input:
if char in brackets.values():
stack.append(char)
if start == -1:
start = count
if char in brackets.keys():
try:
last_bracket = stack.pop()
except IndexError:
return (-1, -1)
if last_bracket != brackets[char]:
return (-1, -1)
if len(stack) == 0:
return (start, count + 1)
count = count + 1
return (-1, -1)
canonical_int_re = re.compile('^(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if not isinstance(string, str) or not canonical_int_re.match(string):
return False
return True
early_variable_re = re.compile('(?P<replace>(?P<type><!?@?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
late_variable_re = re.compile('(?P<replace>(?P<type>>!?@?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def ExpandVariables(input, is_late, variables, build_file):
# Look for the pattern that gets expanded into variables
if not is_late:
variable_re = early_variable_re
else:
variable_re = late_variable_re
input_str = str(input)
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings, and we want
# MatchObjects).
matches = [match for match in variable_re.finditer(input_str)]
output = input_str
if matches:
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Matches: %s" % repr(match))
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!).
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, is_late, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command:
# Run the command in the build file's directory.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '':
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'" %
(contents,build_file_dir))
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
(p_stdout, p_stderr) = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise Exception("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'" %
(contents,build_file_dir))
replacement = cached_value
else:
if not contents in variables:
raise KeyError, 'Undefined variable ' + contents + \
' in ' + build_file
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if not isinstance(item, str) and not isinstance(item, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'list contains a ' + \
item.__class__.__name__
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, is_late, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'found a ' + replacement.__class__.__name__
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found output %s, recursing." % repr(output))
if isinstance(output, list):
new_output = []
for item in output:
new_output.append(ExpandVariables(item, is_late, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, is_late, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Expanding %s to %s" % (repr(input), repr(output)))
return output
def ProcessConditionsInDict(the_dict, is_late, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on is_late. If is_late is False, 'conditions' is used.
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to is_late, immediately
# prior to being merged.
if not is_late:
conditions_key = 'conditions'
else:
conditions_key = 'target_conditions'
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise TypeError, conditions_key + ' must be a list'
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise IndexError, conditions_key + ' ' + condition[0] + \
' must be length 2 or 3, not ' + len(condition)
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, is_late, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, is_late,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, is_late, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], is_late,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, is_late, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, is_late, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, is_late, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, is_late, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, is_late, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, is_late, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, is_late, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, is_late, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
del the_list[index]
for expanded_item in expanded:
the_list.insert(index, expanded_item)
index = index + 1
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise KeyError, 'Duplicate target definitions for ' + target_name
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
global multiple_toolsets
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise KeyError, 'Found ' + dependency + ' in ' + dependency_key + \
' of ' + target + ', but not in dependencies'
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise KeyError, 'Found wildcard in ' + dependency_key + ' of ' + \
target + ' referring to same build file'
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(Exception):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = self.dependents[:]
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop(0)
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.append(node_dependent)
return flat_list
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def LinkDependencies(self, targets, dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect depenedencies
that are linked into the linkable target for which the list is being built.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if not 'target_name' in targets[self.ref]:
raise Exception("Missing 'target_name' field in target.")
try:
target_type = targets[self.ref]['type']
except KeyError, e:
raise Exception("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
if target_type != 'none':
# Special case: "none" type targets don't produce any linkable products
# and shouldn't be exposed as link dependencies, although dependencies
# of "none" type targets may still be link dependencies.
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency.LinkDependencies(targets, dependencies, False)
return dependencies
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if not target in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
if not 'dependencies' in spec or len(spec['dependencies']) == 0:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
dependencies = spec['dependencies']
for index in xrange(0, len(dependencies)):
try:
dependency = dependencies[index]
dependency_node = dependency_nodes[dependency]
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
except KeyError, e:
gyp.common.ExceptionAppend(e,
'while trying to load target %s' % target)
raise
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException, \
'Some targets not reachable, cycle in dependency graph detected'
return [dependency_nodes, flat_list]
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = dependency_nodes[target].LinkDependencies(targets)
else:
raise KeyError, "DoDependentSettings doesn't know how to determine " + \
'dependencies for ' + key
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
index = 0
while index < len(target_dict['dependencies']):
dependency = target_dict['dependencies'][index]
dependency_dict = targets[dependency]
if dependency_dict['type'] == 'static_library' and \
(not 'hard_dependency' in dependency_dict or \
not dependency_dict['hard_dependency']):
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done
# when a dependent relies on some side effect other than just the
# build product, like a rule or action output. Take the dependency
# out of the list, and don't increment index because the next
# dependency to analyze will shift into the index formerly occupied
# by the one being removed.
del target_dict['dependencies'][index]
else:
index = index + 1
# If the dependencies list is empty, it's not needed, so unhook it.
if len(target_dict['dependencies']) == 0:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].LinkDependencies(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
return os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
prepend_index = 0
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not to_item in to:
to.append(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise KeyError, 'Incompatible list policies ' + k + ' and ' + \
list_incompatible
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
global non_configuration_keys
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].keys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'] ],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if pattern_re.search(list_item):
# Regular expression match.
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0
# (exclude).
list_actions[index] = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1
# (include).
list_actions[index] = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + \
name + ' key ' + key
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise KeyError, \
name + ' key ' + excluded_key + ' must not be present prior ' + \
' to applying exclusion/regex filters for ' + list_key
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise KeyError, 'rule %s exists in duplicate, target %s' % \
(rule_name, target)
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension in rule_extensions:
raise KeyError, ('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') % \
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name)
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise KeyError, \
'rule_sources must not exist in input, target %s rule %s' % \
(target, rule_name)
extension = rule['extension']
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise Exception("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', [])
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise Exception("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise Exception("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise Exception("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise Exception("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise Exception("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def Load(build_files, variables, includes, depth, generator_input_info, check):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specifc data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
# TODO(mark) handle variants if the generator doesn't want them directly.
generator_handles_variants = \
generator_input_info['generator_handles_variants']
global absolute_build_file_paths
absolute_build_file_paths = \
generator_input_info['generator_wants_absolute_build_file_paths']
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
for build_file in build_files:
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_file = os.path.normpath(build_file)
try:
LoadTargetBuildFile(build_file, data, aux_data, variables, includes,
depth, check)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes)
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(target_dict, True, variables,
build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| bsd-3-clause |
40023255/w16b_test | static/Brython3.1.3-20150514-095342/Lib/unittest/test/testmock/testwith.py | 739 | 5806 | import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
maciek263/django2 | myvenv/Lib/site-packages/django/core/exceptions.py | 486 | 5276 | """
Global Django exception and warning classes.
"""
from django.utils import six
from django.utils.encoding import force_text
class FieldDoesNotExist(Exception):
"""The requested model field does not exist"""
pass
class DjangoRuntimeWarning(RuntimeWarning):
pass
class AppRegistryNotReady(Exception):
"""The django.apps registry is not populated yet"""
pass
class ObjectDoesNotExist(Exception):
"""The requested object does not exist"""
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"""The query returned multiple objects when only one was expected."""
pass
class SuspiciousOperation(Exception):
"""The user did something suspicious"""
class SuspiciousMultipartForm(SuspiciousOperation):
"""Suspect MIME request in multipart form data"""
pass
class SuspiciousFileOperation(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
class DisallowedHost(SuspiciousOperation):
"""HTTP_HOST header contains invalid value"""
pass
class DisallowedRedirect(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
class PermissionDenied(Exception):
"""The user did not have permission to do that"""
pass
class ViewDoesNotExist(Exception):
"""The requested view does not exist"""
pass
class MiddlewareNotUsed(Exception):
"""This middleware is not used in this server configuration"""
pass
class ImproperlyConfigured(Exception):
"""Django is somehow improperly configured"""
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
# PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.
super(ValidationError, self).__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
# PY2 has a `message` property which is always there so we can't
# duck-type on it. It was introduced in Python 2.5 and already
# deprecated in Python 2.6.
elif not hasattr(message, 'message' if six.PY3 else 'code'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
if hasattr(message, 'error_dict'):
self.error_list.extend(sum(message.error_dict.values(), []))
else:
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self]
@property
def message_dict(self):
# Trigger an AttributeError if this ValidationError
# doesn't have an error_dict.
getattr(self, 'error_dict')
return dict(self)
@property
def messages(self):
if hasattr(self, 'error_dict'):
return sum(dict(self).values(), [])
return list(self)
def update_error_dict(self, error_dict):
if hasattr(self, 'error_dict'):
for field, error_list in self.error_dict.items():
error_dict.setdefault(field, []).extend(error_list)
else:
error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list)
return error_dict
def __iter__(self):
if hasattr(self, 'error_dict'):
for field, errors in self.error_dict.items():
yield field, list(ValidationError(errors))
else:
for error in self.error_list:
message = error.message
if error.params:
message %= error.params
yield force_text(message)
def __str__(self):
if hasattr(self, 'error_dict'):
return repr(dict(self))
return repr(list(self))
def __repr__(self):
return 'ValidationError(%s)' % self
| mit |
guardian/alerta | tests/test_isa_18_2.py | 1 | 36984 | import json
import unittest
from alerta.app import alarm_model, create_app, db, plugins
from alerta.plugins import PluginBase
class AlertTestCase(unittest.TestCase):
def setUp(self):
test_config = {
'TESTING': True,
'ALARM_MODEL': 'ISA_18_2',
'AUTH_REQUIRED': False,
'PLUGINS': [],
'ALERT_TIMEOUT': 120,
'HISTORY_LIMIT': 5
}
self.app = create_app(test_config, environment='development')
self.client = self.app.test_client()
self.fault_alarm = {
'severity': 'Critical',
'origin': 'LIC_101',
'value': 'ERROR',
'resource': 'LIC_101',
'event': 'FAILED_ALM',
'group': 'PROCESS',
'text': 'Shutdown/Interlocked',
'type': 'FAULT',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.critical_alarm = {
'severity': 'Critical',
'origin': 'PID1',
'value': '19',
'resource': 'LIC_101',
'event': 'HI_HI_ALM',
'group': 'PROCESS',
'text': 'High High Alarm Limit 15',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.high_alarm = {
'severity': 'High',
'origin': 'PID1',
'value': '13',
'resource': 'LIC_101',
'event': 'HI_ALM',
'group': 'PROCESS',
'text': 'High Alarm Limit 10',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.medium_alarm = {
'severity': 'Medium',
'origin': 'PID1',
'value': '6',
'resource': 'LIC_101',
'event': 'LO_ALM',
'group': 'PROCESS',
'text': 'Low Alarm Limit 5',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.low_alarm = {
'severity': 'Low',
'origin': 'PID1',
'value': '1',
'resource': 'LIC_101',
'event': 'LO_LO_ALM',
'group': 'PROCESS',
'text': 'Low Low Alarm Limit 0',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.advisory_alarm = {
'severity': 'Advisory',
'origin': 'PID1',
'value': '1',
'resource': 'LIC_101',
'event': 'ADVISE_ALM',
'group': 'PROCESS',
'text': 'Low Low Alarm Limit 0',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
self.ok_alarm = {
'severity': 'OK',
'origin': 'PID1',
'value': '0',
'resource': 'LIC_101',
'event': 'RST_ALM',
'group': 'PROCESS',
'text': 'OK Alarm Limit 0',
'type': 'ALARM',
'environment': 'Production',
'service': ['REACTORS'],
'correlate': ['FAILED_ALM', 'HI_HI_ALM', 'HI_ALM', 'LO_ALM', 'LO_LO_ALM', 'ADVISE_ALM', 'RST_ALM']
}
def tearDown(self):
db.destroy()
def test_ack_active_alarm(self):
# Create OK alarm
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
alert_id = data['id']
# Alarm Occurs, Normal (A) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], alarm_model.DEFAULT_PREVIOUS_SEVERITY)
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Operator Ack, Unack (B) -> Ack (C)
data = {
'action': 'ack',
'text': 'operator ack'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'ACKED')
# Re-Alarm, Ack (C) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.critical_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'Critical')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '19')
self.assertEqual(data['alert']['text'], 'High High Alarm Limit 15')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Operator Ack (again), Unack (B) -> Ack (C)
data = {
'action': 'ack',
'text': 'operator ack'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'Critical')
self.assertEqual(data['alert']['status'], 'ACKED')
# Process RTN Alarm Clears, Ack (C) -> Normal (A)
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'NORM')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'Critical')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
def test_rtn_before_ack(self):
# Create OK alarm
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
alert_id = data['id']
# Alarm Occurs, Normal (A) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], alarm_model.DEFAULT_PREVIOUS_SEVERITY)
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Process RTN and Alarm Clears, Unack (B) -> RTN Unack (D)
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'RTNUN')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Resend, Duplicate=1
response = self.client.post('/alert', data=json.dumps(self.medium_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'LO_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'Medium')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '6')
self.assertEqual(data['alert']['text'], 'Low Alarm Limit 5')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'OK')
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Resend, Duplicate=1
response = self.client.post('/alert', data=json.dumps(self.medium_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'LO_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'Medium')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '6')
self.assertEqual(data['alert']['text'], 'Low Alarm Limit 5')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 1)
self.assertEqual(data['alert']['repeat'], True)
self.assertEqual(data['alert']['previousSeverity'], 'OK')
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Process RTN and Alarm Clears, Unack (B) -> RTN Unack (D)
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'RTNUN')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'Medium')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Operator Ack, RTN Unack (D) -> Normal (A)
data = {
'action': 'ack',
'text': 'operator ack'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'NORM')
def test_operator_shelve(self):
# Create OK alarm
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
alert_id = data['id']
# Alarm Occurs, Normal (A) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], alarm_model.DEFAULT_PREVIOUS_SEVERITY)
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Operator Ack (again), Unack (B) -> Ack (C)
data = {
'action': 'ack',
'text': 'operator ack'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'ACKED')
# Operator Shelve, Any (*) -> Shelve (E)
data = {
'action': 'shelve',
'text': 'operator shelved'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'SHLVD')
# Operator Unshelve, Shelve (E) -> Unack (B)
data = {
'action': 'unshelve',
'text': 'operator unshelved'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'UNACK')
# Operator Shelve, Any (*) -> Shelve (E)
data = {
'action': 'shelve',
'text': 'operator shelved'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'SHLVD')
# Process RTN and Alarm Clears, Unack (B) -> RTN Unack (D)
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'SHLVD')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Operator Unshelve, Shelve (E) -> Normal (A)
data = {
'action': 'unshelve',
'text': 'operator unshelved'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'NORM')
def test_out_of_service(self):
plugins.plugins['blackout'] = NotificationBlackout()
# Create OK alarm
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
alert_id = data['id']
# Alarm Occurs, Normal (A) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], alarm_model.DEFAULT_PREVIOUS_SEVERITY)
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Operator Ack (again), Unack (B) -> Ack (C)
data = {
'action': 'ack',
'text': 'operator ack'
}
response = self.client.put('/alert/' + alert_id + '/action',
data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'ACKED')
# Create a blackout
plugins.plugins['blackout'] = NotificationBlackout()
blackout = {
'environment': 'Production',
'service': ['REACTORS']
}
response = self.client.post('/blackout', data=json.dumps(blackout), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
blackout_id = data['id']
# Blackout, Ack (C) -> Out-of-service (G)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'OOSRV')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 1)
self.assertEqual(data['alert']['repeat'], True)
self.assertEqual(data['alert']['previousSeverity'], alarm_model.DEFAULT_PREVIOUS_SEVERITY)
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Resend Alarm
response = self.client.post('/alert', data=json.dumps(self.low_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'LO_LO_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'Low')
self.assertEqual(data['alert']['status'], 'OOSRV')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '1')
self.assertEqual(data['alert']['text'], 'Low Low Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Remove blackout
response = self.client.delete('/blackout/' + blackout_id, content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'Low')
self.assertEqual(data['alert']['status'], 'OOSRV')
# Resend Alarm, Out-of-service (E) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.low_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'LO_LO_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'Low')
self.assertEqual(data['alert']['status'], 'UNACK')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '1')
self.assertEqual(data['alert']['text'], 'Low Low Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 1)
self.assertEqual(data['alert']['repeat'], True)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Create a blackout
plugins.plugins['blackout'] = NotificationBlackout()
blackout = {
'environment': 'Production',
'service': ['REACTORS'],
'resource': 'LIC_101'
}
response = self.client.post('/blackout', data=json.dumps(blackout), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
blackout_id = data['id']
# Blackout, Ack (C) -> Out-of-service (G)
response = self.client.post('/alert', data=json.dumps(self.high_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'HI_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'High')
self.assertEqual(data['alert']['status'], 'OOSRV')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '13')
self.assertEqual(data['alert']['text'], 'High Alarm Limit 10')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'Low')
self.assertEqual(data['alert']['trendIndication'], 'moreSevere')
# Resend Alarm
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'OOSRV')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 0)
self.assertEqual(data['alert']['repeat'], False)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
# Remove blackout
response = self.client.delete('/blackout/' + blackout_id, content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'OOSRV')
# Resend Alarm, Out-of-service (E) -> Unack (B)
response = self.client.post('/alert', data=json.dumps(self.ok_alarm), content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertIn(alert_id, data['alert']['id'])
self.assertEqual(data['alert']['resource'], 'LIC_101')
self.assertEqual(data['alert']['event'], 'RST_ALM')
self.assertEqual(data['alert']['environment'], 'Production')
self.assertEqual(data['alert']['severity'], 'OK')
self.assertEqual(data['alert']['status'], 'NORM')
self.assertEqual(data['alert']['service'], ['REACTORS'])
self.assertEqual(data['alert']['group'], 'PROCESS')
self.assertEqual(data['alert']['value'], '0')
self.assertEqual(data['alert']['text'], 'OK Alarm Limit 0')
self.assertEqual(data['alert']['tags'], [])
self.assertEqual(data['alert']['attributes'], {'ip': '127.0.0.1'})
self.assertEqual(data['alert']['origin'], 'PID1')
self.assertEqual(data['alert']['type'], 'ALARM')
self.assertEqual(data['alert']['duplicateCount'], 1)
self.assertEqual(data['alert']['repeat'], True)
self.assertEqual(data['alert']['previousSeverity'], 'High')
self.assertEqual(data['alert']['trendIndication'], 'lessSevere')
class NotificationBlackout(PluginBase):
def pre_receive(self, alert):
if alert.is_blackout():
alert.status = 'OOSRV'
return alert
def post_receive(self, alert):
return alert
def status_change(self, alert, status, text):
return
| apache-2.0 |
NelisVerhoef/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
frostasm/retext | setup.py | 1 | 3114 | #!/usr/bin/env python3
VERSION = '5.1.0'
long_description = '''\
ReText is simple text editor that supports Markdown and reStructuredText
markup languages. It is written in Python using PyQt libraries.'''
requires = ['docutils', 'Markdown', 'Markups', 'pyenchant', 'Pygments']
import re
import sys
from os.path import join
from distutils import log
from distutils.core import setup, Command
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.command.install_scripts import install_scripts
from distutils.command.upload import upload
from subprocess import check_call
from glob import glob
from warnings import filterwarnings
if sys.version_info[0] < 3:
sys.exit('Error: Python 3.x is required.')
def build_translations():
print('running build_translations')
error = None
for ts_file in glob(join('locale', '*.ts')):
try:
check_call(('lrelease', ts_file))
except Exception as e:
error = e
if error:
print('Failed to build translations:', error)
class retext_build(build):
def run(self):
build.run(self)
if not glob(join('locale', '*.qm')):
build_translations()
class retext_sdist(sdist):
def run(self):
build_translations()
sdist.run(self)
class retext_install_scripts(install_scripts):
def run(self):
import shutil
install_scripts.run(self)
for file in self.get_outputs():
log.info('renaming %s to %s', file, file[:-3])
shutil.move(file, file[:-3])
class retext_test(Command):
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
from tests import main
testprogram = main(module=None, argv=sys.argv[:1], verbosity=2, exit=False)
if not testprogram.result.wasSuccessful():
sys.exit(1)
class retext_upload(upload):
def run(self):
self.sign = True
self.identity = '0x2f1c8ae0'
upload.run(self)
for command, pyversion, filename in self.distribution.dist_files:
full_version = re.search(r'ReText-([\d\.]+)\.tar\.gz', filename).group(1)
new_path = ('mandriver@frs.sourceforge.net:/home/frs/project/r/re/retext/ReText-%s/' %
full_version[:-2])
args = ['scp', filename, filename + '.asc', new_path]
print('calling process', args)
check_call(args)
if '--no-rename' in sys.argv:
retext_install_scripts = install_scripts
sys.argv.remove('--no-rename')
filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
setup(name='ReText',
version=VERSION,
description='Simple editor for Markdown and reStructuredText',
long_description=long_description,
author='Dmitry Shachnev',
author_email='mitya57@gmail.com',
url='http://retext.sourceforge.net/',
packages=['ReText'],
scripts=['retext.py'],
data_files=[
('share/retext/locale', glob('locale/*.qm'))
],
requires=requires,
install_requires=requires,
cmdclass={
'build': retext_build,
'sdist': retext_sdist,
'install_scripts': retext_install_scripts,
'test': retext_test,
'upload': retext_upload
},
license='GPL 2+'
)
| gpl-3.0 |
CharlesShang/FastMaskRCNN | libs/datasets/pycocotools/mask.py | 2 | 4605 | __author__ = 'tsungyi'
import libs.datasets.pycocotools._mask as _mask
# Interface for manipulating masks stored in RLE format.
#
# RLE is a simple yet efficient format for storing binary masks. RLE
# first divides a vector (or vectorized image) into a series of piecewise
# constant regions and then for each piece simply stores the length of
# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
# (note that the odd counts are always the numbers of zeros). Instead of
# storing the counts directly, additional compression is achieved with a
# variable bitrate representation based on a common scheme called LEB128.
#
# Compression is greatest given large piecewise constant regions.
# Specifically, the size of the RLE is proportional to the number of
# *boundaries* in M (or for an image the number of boundaries in the y
# direction). Assuming fairly simple shapes, the RLE representation is
# O(sqrt(n)) where n is number of pixels in the object. Hence space usage
# is substantially lower, especially for large simple objects (large n).
#
# Many common operations on masks can be computed directly using the RLE
# (without need for decoding). This includes computations such as area,
# union, intersection, etc. All of these operations are linear in the
# size of the RLE, in other words they are O(sqrt(n)) where n is the area
# of the object. Computing these operations on the original mask is O(n).
# Thus, using the RLE can result in substantial computational savings.
#
# The following API functions are defined:
# encode - Encode binary masks using RLE.
# decode - Decode binary masks encoded via RLE.
# merge - Compute union or intersection of encoded masks.
# iou - Compute intersection over union between masks.
# area - Compute area of encoded masks.
# toBbox - Get bounding boxes surrounding encoded masks.
# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
#
# Usage:
# Rs = encode( masks )
# masks = decode( Rs )
# R = merge( Rs, intersect=false )
# o = iou( dt, gt, iscrowd )
# a = area( Rs )
# bbs = toBbox( Rs )
# Rs = frPyObjects( [pyObjects], h, w )
#
# In the API the following formats are used:
# Rs - [dict] Run-length encoding of binary masks
# R - dict Run-length encoding of binary mask
# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
# bbs - [nx4] Bounding box(es) stored as [x y w h]
# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
# dt,gt - May be either bounding boxes or encoded masks
# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
#
# Finally, a note about the intersection over union (iou) computation.
# The standard iou of a ground truth (gt) and detected (dt) object is
# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
# For "crowd" regions, we use a modified criteria. If a gt object is
# marked as "iscrowd", we allow a dt to match any subregion of the gt.
# Choosing gt' in the crowd gt that best matches the dt can be done using
# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
# For crowd gt regions we use this modified criteria above for the iou.
#
# To compile run "python setup.py build_ext --inplace"
# Please do not contact us for help with compiling.
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
iou = _mask.iou
merge = _mask.merge
frPyObjects = _mask.frPyObjects
def encode(bimask):
if len(bimask.shape) == 3:
return _mask.encode(bimask)
elif len(bimask.shape) == 2:
h, w = bimask.shape
return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
def decode(rleObjs):
if type(rleObjs) == list:
return _mask.decode(rleObjs)
else:
return _mask.decode([rleObjs])[:,:,0]
def area(rleObjs):
if type(rleObjs) == list:
return _mask.area(rleObjs)
else:
return _mask.area([rleObjs])[0]
def toBbox(rleObjs):
if type(rleObjs) == list:
return _mask.toBbox(rleObjs)
else:
return _mask.toBbox([rleObjs])[0] | apache-2.0 |
AThom0x7cc/SimpleMultiplayerGame | src/client/game.py | 1 | 3176 | import pygame
from . import Server
class Player:
def __init__(self, screen, width, height, username):
self.screen = screen
self.width = width
self.height = height
self.x, self.y = 16, self.height / 2
self.speed = 8
self.padWid, self.padHei = 8, 64
self.points = 0
self.username = username
self.username_font = pygame.font.Font("client/imagine_font.ttf", 30)
def movement(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.y -= self.speed
elif keys[pygame.K_s]:
self.y += self.speed
if self.y <= 0:
self.y = 0
elif self.y >= self.height - 64:
self.y = self.height - 64
def draw(self):
score_blit = self.username_font.render(str(self.username), 1, (0, 0, 0))
self.screen.blit(score_blit, (32, 16))
pygame.draw.rect(self.screen, (0, 0, 0), (self.x, self.y, self.padWid, self.padHei))
class Enemy:
def __init__(self, screen, width, height):
self.screen = screen
self.width = width
self.height = height
self.x, self.y = self.width - 16, self.height / 2
self.padWid, self.padHei = 8, 64
self.username = ''
self.username_font = pygame.font.Font("client/imagine_font.ttf", 30)
def draw(self):
score_blit = self.username_font.render(str(self.username), 1, (255, 255, 255))
self.screen.blit(score_blit, (self.height + 92, 16))
pygame.draw.rect(self.screen, (0, 0, 0), (self.x, self.y, self.padWid, self.padHei))
class Ball:
def __init__(self, screen, width, height):
self.screen = screen
self.width = width
self.height = height
self.x, self.y = self.width / 2, self.height / 2
self.size = 8
def draw(self):
pygame.draw.rect(self.screen, (0, 0, 0), (self.x, self.y, self.size, self.size))
class Game:
def __init__(self, username, server_address):
self.screen_width = 640
self.screen_height = 480
self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
pygame.display.set_caption("Pong")
pygame.font.init()
self.clock = pygame.time.Clock()
self.FPS = 60
self.ball = Ball(self.screen, self.screen_width, self.screen_height)
self.player = Player(self.screen, self.screen_width, self.screen_height, username)
self.enemy = Enemy(self.screen, self.screen_width, self.screen_height)
self.server = Server.Server(self.player, self.enemy, self.ball, server_address, username)
self.running = True
def start(self):
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game exited by user")
self.server.close()
exit()
self.player.movement()
self.screen.fill((255, 255, 255))
self.ball.draw()
self.player.draw()
self.enemy.draw()
pygame.display.flip()
self.clock.tick(self.FPS)
| mit |
willianpaixao/farinha | farinha/settings.py | 2 | 2204 | """
Django settings for farinha project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q8d=o68b*-d=9&wa6&@61a59dfu)bp^h9i@@&#^wrrv1($a$u@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
ADMINS = (
('Willian Paixao', 'willian@ufpa.br'),
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'farinha',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'farinha.urls'
WSGI_APPLICATION = 'farinha.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
| gpl-3.0 |
frouty/odoogoeen | addons/mrp/__init__.py | 437 | 1165 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp
import stock
import product
import wizard
import report
import company
import procurement
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ashang/calibre | src/calibre/ebooks/markdown/extensions/fenced_code.py | 46 | 5127 | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Optionally backticks instead of tildes as per how github's code block markdown is identified:
>>> text = '''
... `````
... # Arbitrary code
... ~~~~~ # these tildes will not close the block
... `````'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code># Arbitrary code
~~~~~ # these tildes will not close the block
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from .codehilite import CodeHilite, CodeHiliteExtension
import re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^(?:~{3,}|`{3,}))[ ]*(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*)\}?)?[ ]*\n(?P<code>.*?)(?<=\n)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(Extension):
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
">normalize_whitespace")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md):
super(FencedBlockPreprocessor, self).__init__(md)
self.checked_for_codehilite = False
self.codehilite_conf = {}
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.markdown.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
lang = LANG_TAG % m.group('lang')
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(m.group('code'),
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
lang=(m.group('lang') or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (lang, self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(configs=None):
return FencedCodeExtension(configs=configs)
| gpl-3.0 |
sadaf2605/django | tests/transactions/tests.py | 24 | 19634 | from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
| bsd-3-clause |
wegtam/weather-client | src/tinkerforge/bricklet_lcd_16x2.py | 1 | 8903 | # -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2013-09-11. #
# #
# Bindings Version 2.0.11 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generator git on tinkerforge.com #
#############################################################
try:
from collections import namedtuple
except ImportError:
try:
from .ip_connection import namedtuple
except ValueError:
from ip_connection import namedtuple
try:
from .ip_connection import Device, IPConnection, Error
except ValueError:
from ip_connection import Device, IPConnection, Error
GetConfig = namedtuple('Config', ['cursor', 'blinking'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletLCD16x2(Device):
"""
Device for controlling a LCD with 2 lines a 16 characters
"""
DEVICE_IDENTIFIER = 211
CALLBACK_BUTTON_PRESSED = 9
CALLBACK_BUTTON_RELEASED = 10
FUNCTION_WRITE_LINE = 1
FUNCTION_CLEAR_DISPLAY = 2
FUNCTION_BACKLIGHT_ON = 3
FUNCTION_BACKLIGHT_OFF = 4
FUNCTION_IS_BACKLIGHT_ON = 5
FUNCTION_SET_CONFIG = 6
FUNCTION_GET_CONFIG = 7
FUNCTION_IS_BUTTON_PRESSED = 8
FUNCTION_SET_CUSTOM_CHARACTER = 11
FUNCTION_GET_CUSTOM_CHARACTER = 12
FUNCTION_GET_IDENTITY = 255
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon)
self.api_version = (2, 0, 0)
self.response_expected[BrickletLCD16x2.FUNCTION_WRITE_LINE] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_CLEAR_DISPLAY] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_BACKLIGHT_ON] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_BACKLIGHT_OFF] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_IS_BACKLIGHT_ON] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLCD16x2.FUNCTION_SET_CONFIG] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_GET_CONFIG] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLCD16x2.FUNCTION_IS_BUTTON_PRESSED] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLCD16x2.CALLBACK_BUTTON_PRESSED] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_FALSE
self.response_expected[BrickletLCD16x2.CALLBACK_BUTTON_RELEASED] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_SET_CUSTOM_CHARACTER] = BrickletLCD16x2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLCD16x2.FUNCTION_GET_CUSTOM_CHARACTER] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLCD16x2.FUNCTION_GET_IDENTITY] = BrickletLCD16x2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletLCD16x2.CALLBACK_BUTTON_PRESSED] = 'B'
self.callback_formats[BrickletLCD16x2.CALLBACK_BUTTON_RELEASED] = 'B'
def write_line(self, line, position, text):
"""
Writes text to a specific line (0 to 1) with a specific position
(0 to 15). The text can have a maximum of 16 characters.
For example: (0, 5, "Hello") will write *Hello* in the middle of the
first line of the display.
The display uses a special charset that includes all ASCII characters except
backslash and tilde. The LCD charset also includes several other non-ASCII characters, see
the `charset specification <https://github.com/Tinkerforge/lcd-16x2-bricklet/raw/master/datasheets/standard_charset.pdf>`__
for details. The Unicode example above shows how to specify non-ASCII characters
and how to translate from Unicode to the LCD charset.
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_WRITE_LINE, (line, position, text), 'B B 16s', '')
def clear_display(self):
"""
Deletes all characters from the display.
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_CLEAR_DISPLAY, (), '', '')
def backlight_on(self):
"""
Turns the backlight on.
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_BACKLIGHT_ON, (), '', '')
def backlight_off(self):
"""
Turns the backlight off.
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_BACKLIGHT_OFF, (), '', '')
def is_backlight_on(self):
"""
Returns *true* if the backlight is on and *false* otherwise.
"""
return self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_IS_BACKLIGHT_ON, (), '', '?')
def set_config(self, cursor, blinking):
"""
Configures if the cursor (shown as "_") should be visible and if it
should be blinking (shown as a blinking block). The cursor position
is one character behind the the last text written with
:func:`WriteLine`.
The default is (false, false).
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_SET_CONFIG, (cursor, blinking), '? ?', '')
def get_config(self):
"""
Returns the configuration as set by :func:`SetConfig`.
"""
return GetConfig(*self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_GET_CONFIG, (), '', '? ?'))
def is_button_pressed(self, button):
"""
Returns *true* if the button (0 to 2) is pressed. If you want to react
on button presses and releases it is recommended to use the
:func:`ButtonPressed` and :func:`ButtonReleased` callbacks.
"""
return self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_IS_BUTTON_PRESSED, (button,), 'B', '?')
def set_custom_character(self, index, character):
"""
The LCD 16x2 Bricklet can store up to 8 custom characters. The characters
consist of 5x8 pixels and can be addressed with the index 0-7. To describe
the pixels, the first 5 bits of 8 bytes are used. For example, to make
a custom character "H", you should transfer the following:
* ``character[0] = 0b00010001`` (decimal value 17)
* ``character[1] = 0b00010001`` (decimal value 17)
* ``character[2] = 0b00010001`` (decimal value 17)
* ``character[3] = 0b00011111`` (decimal value 31)
* ``character[4] = 0b00010001`` (decimal value 17)
* ``character[5] = 0b00010001`` (decimal value 17)
* ``character[6] = 0b00010001`` (decimal value 17)
* ``character[7] = 0b00000000`` (decimal value 0)
The characters can later be written with :func:`WriteLine` by using the
characters with the byte representation 8 to 15.
You can play around with the custom characters in Brick Viewer since
version 2.0.1.
Custom characters are stored by the LCD in RAM, so they have to be set
after each startup.
.. versionadded:: 2.0.1~(Plugin)
"""
self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_SET_CUSTOM_CHARACTER, (index, character), 'B 8B', '')
def get_custom_character(self, index):
"""
Returns the custom character for a given index, as set with
:func:`SetCustomCharacter`.
.. versionadded:: 2.0.1~(Plugin)
"""
return self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_GET_CUSTOM_CHARACTER, (index,), 'B', '8B')
def get_identity(self):
"""
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c' or 'd'.
The device identifiers can be found :ref:`here <device_identifier>`.
.. versionadded:: 2.0.0~(Plugin)
"""
return GetIdentity(*self.ipcon.send_request(self, BrickletLCD16x2.FUNCTION_GET_IDENTITY, (), '', '8s 8s c 3B 3B H'))
def register_callback(self, id, callback):
"""
Registers a callback with ID *id* to the function *callback*.
"""
self.registered_callbacks[id] = callback
LCD16x2 = BrickletLCD16x2 # for backward compatibility
| gpl-2.0 |
ashhher3/ibis | ibis/expr/tests/test_case.py | 11 | 3386 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis
from ibis.compat import unittest
from ibis.expr.tests.mocks import BasicTestCase
from ibis.tests.util import assert_equal
class TestCaseExpressions(BasicTestCase, unittest.TestCase):
def test_ifelse(self):
bools = self.table.g.isnull()
result = bools.ifelse("foo", "bar")
assert isinstance(result, ir.StringArray)
def test_ifelse_literal(self):
pass
def test_simple_case_expr(self):
case1, result1 = "foo", self.table.a
case2, result2 = "bar", self.table.c
default_result = self.table.b
expr1 = self.table.g.lower().cases(
[(case1, result1),
(case2, result2)],
default=default_result
)
expr2 = (self.table.g.lower().case()
.when(case1, result1)
.when(case2, result2)
.else_(default_result)
.end())
assert_equal(expr1, expr2)
assert isinstance(expr1, ir.Int32Array)
def test_multiple_case_expr(self):
case1 = self.table.a == 5
case2 = self.table.b == 128
case3 = self.table.c == 1000
result1 = self.table.f
result2 = self.table.b * 2
result3 = self.table.e
default = self.table.d
expr = (ibis.case()
.when(case1, result1)
.when(case2, result2)
.when(case3, result3)
.else_(default)
.end())
op = expr.op()
assert isinstance(expr, ir.DoubleArray)
assert isinstance(op, ops.SearchedCase)
assert op.default is default
def test_simple_case_no_default(self):
# TODO: this conflicts with the null else cases below. Make a decision
# about what to do, what to make the default behavior based on what the
# user provides. SQL behavior is to use NULL when nothing else
# provided. The .replace convenience API could use the field values as
# the default, getting us around this issue.
pass
def test_simple_case_null_else(self):
expr = self.table.g.case().when("foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringArray)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ir.NullLiteral)
def test_multiple_case_null_else(self):
expr = ibis.case().when(self.table.g == "foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringArray)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ir.NullLiteral)
def test_case_type_precedence(self):
pass
def test_no_implicit_cast_possible(self):
pass
| apache-2.0 |
Transkribus/TranskribusDU | TranskribusDU/tasks/performCVLLA.py | 1 | 25948 | # -*- coding: utf-8 -*-
"""
performCVLLA.py
create profile for nomacs (CVL LA toolkit)
H. Déjean
copyright Xerox 2017
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
import glob
import common.Component as Component
from common.trace import traceln
from xml_formats.PageXml import PageXml
from xml_formats.PageXml import MultiPageXml
from util.Polygon import Polygon
from lxml import etree
class LAProcessor(Component.Component):
"""
"""
usage = ""
version = "v.01"
description = "description: Nomacs LA processor"
if sys.platform == 'win32':
cNomacs = '"C:\\Program Files\\READFramework\\bin\\nomacs.exe"'
else:
cNomacs = "/opt/Tools/src/tuwien-2017/nomacs/nomacs"
cCVLLAProfile = """
[%%General]
FileList="%s"
OutputDirPath=%s
FileNamePattern=<c:0>.<old>
PluginBatch\LayoutPlugin\General\drawResults=false
PluginBatch\LayoutPlugin\General\saveXml=true
PluginBatch\LayoutPlugin\General\\useTextRegions=%s
PluginBatch\LayoutPlugin\Layout Analysis Module\computeSeparators=true
PluginBatch\LayoutPlugin\Layout Analysis Module\localBlockOrientation=false
PluginBatch\LayoutPlugin\Layout Analysis Module\maxImageSide=3000
PluginBatch\LayoutPlugin\Layout Analysis Module\minSuperPixelsPerBlock=15
PluginBatch\LayoutPlugin\Layout Analysis Module\\removeWeakTextLines=true
PluginBatch\LayoutPlugin\Layout Analysis Module\scaleMode=1
PluginBatch\LayoutPlugin\Super Pixel Classification\classifierPath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\labelConfigFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerImage=1000000
PluginBatch\LayoutPlugin\Super Pixel Labeler\minNumFeaturesPerClass=10000
PluginBatch\pluginList=Layout Analysis | Layout Analysis
SaveInfo\Compression=-1
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
SaveInfo\Mode=2
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Layout Analysis Module\\removeWeakTextLines=true
"""
#PluginBatch\pluginList="Layout Analysis | Layout Analysis;Layout Analysis | Detect Lines"
cCVLLASeparatorProfile="""
[%%General]
FileList="%s"
OutputDirPath=%s
FileNamePattern=<c:0>.<old>
SaveInfo\Compression=-1
SaveInfo\Mode=2
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
PluginBatch\pluginList=Layout Analysis | Detect Separator Lines
PluginBatch\LayoutPlugin\General\\useTextRegions=false
PluginBatch\LayoutPlugin\General\drawResults=false
PluginBatch\LayoutPlugin\General\saveXml=true
PluginBatch\LayoutPlugin\Super Pixel Labeler\\featureFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\labelConfigFilePath=
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerImage=1000000
PluginBatch\LayoutPlugin\Super Pixel Labeler\minNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Labeler\maxNumFeaturesPerClass=10000
PluginBatch\LayoutPlugin\Super Pixel Classification\classifierPath=
"""
cCVLProfileTabReg ="""
[%%General]
FileList="%s"
OutputDirPath="%s"
FileNamePattern=<c:0>.<old>
SaveInfo\Compression=-1
SaveInfo\Mode=2
SaveInfo\DeleteOriginal=false
SaveInfo\InputDirIsOutputDir=true
PluginBatch\pluginList=Forms Analysis | Apply template (Match)
PluginBatch\FormAnalysis\FormFeatures\\formTemplate="%s"
PluginBatch\FormAnalysis\FormFeatures\distThreshold=200
PluginBatch\FormAnalysis\FormFeatures\colinearityThreshold=20
PluginBatch\FormAnalysis\FormFeatures\\variationThresholdLower=0.5
PluginBatch\FormAnalysis\FormFeatures\\variationThresholdUpper=0.55
PluginBatch\FormAnalysis\FormFeatures\saveChilds=false
"""
# cCVLProfileTabReg ="""
# [%%General]
# FileList="%s"
# OutputDirPath="%s"
# FileNamePattern=<c:0>.<old>
# SaveInfo\Compression=-1
# SaveInfo\Mode=2
# SaveInfo\DeleteOriginal=false
# SaveInfo\InputDirIsOutputDir=true
# PluginBatch\pluginList=Forms Analysis | Apply template (Match)
# PluginBatch\FormAnalysis\FormFeatures\\formTemplate="%s"
# PluginBatch\FormAnalysis\FormFeatures\distThreshold=200
# PluginBatch\FormAnalysis\FormFeatures\colinearityThreshold=20
# PluginBatch\FormAnalysis\FormFeatures\\variationThresholdLower=0.5
# PluginBatch\FormAnalysis\FormFeatures\\variationThresholdUpper=0.55
# PluginBatch\FormAnalysis\FormFeatures\saveChilds=false
# """
#--- INIT -------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Always call first the Component constructor.
"""
Component.Component.__init__(self, "tableProcessor", self.usage, self.version, self.description)
self.coldir = None
self.docid= None
self.bKeepRegion = False
self.bKeepTL=False
self.bTemplate = False
self.bBaseLine = False
self.bSeparator = False
self.bRegularTextLine = False
self.sTemplateFile = None
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
def setParams(self, dParams):
"""
Always call first the Component setParams
Here, we set our internal attribute according to a possibly specified value (otherwise it stays at its default value)
"""
Component.Component.setParams(self, dParams)
if "coldir" in dParams.keys():
self.coldir = dParams["coldir"].strip()
if "docid" in dParams.keys():
self.docid = dParams["docid"].strip()
# if dParams.has_key("bRegion"):
# self.bKeepRegion = dParams["bRegion"]
if "bTL" in dParams.keys():
self.bKeepTL = dParams["bTL"]
if "bBaseline" in dParams.keys():
self.bBaseLine = dParams["bBaseline"]
if "bSeparator" in dParams.keys():
self.bSeparator = dParams["bSeparator"]
if "template" in dParams.keys():
self.bTemplate = dParams["template"]
if "regTL" in dParams.keys():
self.bRegularTextLine = dParams["regTL"]
if "templatefile" in dParams.keys():
self.sTemplateFile = dParams["templatefile"]
self.bTemplate=True
def reintegrateTextIntoCells(self,doc,lLTextLines=[]):
"""
from XMLDSTABLE
"""
def overlapX(zone1,zone2):
[a1,a2] = zone1 #self.getX(),self.getX()+ self.getWidth()
[b1,b2] = zone2 #zone.getX(),zone.getX()+ zone.getWidth()
return min(a2, b2) >= max(a1, b1)
def overlapY(zone1,zone2):
[a1,a2] = zone1 #self.getY(),self.getY() + self.getHeight()
[b1,b2] = zone2 #zone.getY(),zone.getY() + zone.getHeight()
return min(a2, b2) >= max(a1, b1)
def signedRatioOverlap(zone1,zone2):
"""
overlap self and zone
return surface of self in zone
"""
[x1,y1,x12,y12] = zone1 #self.getX(),self.getY(),self.getHeight(),self.getWidth()
[x2,y2,x22,y22] = zone2 #zone.getX(),zone.getY(),zone.getHeight(),zone.getWidth()
w1,h1 = x12-x1,y12-y1
w2,h2 = x22-x2,y22-y2
fOverlap = 0.0
# print (x1,x12),(x2,x22)
# print overlapX((x1,x12),(x2,x22))
# print (y1,y12),(y2,y22)
# print overlapY((y1,y12),(y2,y22))
# if overlapX((x1,w1),(x2,w2)) and overlapY((y1,h1),(y2,h2)):
if overlapX((x1,x12),(x2,x22)) and overlapY((y1,y12),(y2,y22)):
[x11,y11,x12,y12] = [x1,y1,x1+w1,y1+h1]
[x21,y21,x22,y22] = [x2,y2,x2+w2,y2+h2]
s1 = w1 * h1
# possible ?
if s1 == 0: s1 = 1.0
#intersection
nx1 = max(x11,x21)
nx2 = min(x12,x22)
ny1 = max(y11,y21)
ny2 = min(y12,y22)
h = abs(nx2 - nx1)
w = abs(ny2 - ny1)
inter = h * w
if inter > 0 :
fOverlap = inter/s1
else:
# if overX and Y this is not possible !
fOverlap = 0.0
return fOverlap
def bestRegionsAssignment(plgtl,lRegions):
"""
find the best (max overlap for self) region for self
"""
lOverlap=[]
for _,plg in lRegions:
lOverlap.append(signedRatioOverlap(plgtl.getBoundingBox(),plg.getBoundingBox()))
# print plgtl.getBoundingBox(), lOverlap
if max(lOverlap) == 0: return None
return lRegions[lOverlap.index(max(lOverlap))]
lPages = PageXml.getChildByName(doc.getroot(),'Page')
lRegionsToBeDeleted = []
for i, page in enumerate(lPages):
if lLTextLines == []:
lTextLines = PageXml.getChildByName(page,'TextLine')
else: lTextLines =lLTextLines[i]
lCells = PageXml.getChildByName(page,'TableCell')
# print len(lCells),len(lTextLines)
lOCells=[]
for cell in lCells:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = cell.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
sPoints=coord.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
plg = Polygon(lXY)
lOCells.append((cell,plg))
# find the best assignment of each text
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
sPoints=coord.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
plg = Polygon(lXY)
cell = bestRegionsAssignment(plg,lOCells)
if cell:
c,_=cell
lRegionsToBeDeleted.append(c.parent)
## what about parent TextRegion delete at least TextRegion/TextEquiv
# tl.unlinkNode()
tlcp = tl.docCopyNode(c.doc,True)
# tlcp.unlinkNode()
c.append(tlcp)
# print c
for region in lRegionsToBeDeleted:
region.getParent().remove(region)
# region.unlinkNode()
# region.freeNode()
def reinitPage(self,doc):
"""
empty page
"""
lNodes = PageXml.getChildByName(doc.getroot(),'Page')
for node in lNodes:
node.unlinkNode()
def findTemplate(self,doc):
"""
find the page where the first TableRegion occurs and extract it
"""
from copy import deepcopy
lT = PageXml.getChildByName(doc.getroot(),'TableRegion')
if lT == []:
return None
firstTable=lT[0]
# lazy guy!
newDoc,fakepage = PageXml.createPageXmlDocument('NLE', '', 0,0)
page=firstTable.getparent()
fakepage.set("imageFilename",page.get('imageFilename'))
fakepage.set("imageWidth",page.get('imageWidth'))
fakepage.set("imageHeight",page.get('imageHeight'))
page.getparent().remove(page)
# add table
xx =deepcopy(firstTable)
fakepage.append(xx)
return newDoc
def createRegistrationProfile(self,sTemplatefile):
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
txt= LAProcessor.cCVLProfileTabReg % (listfile,localpath.replace(os.sep,"/"),os.path.abspath("%s"%(sTemplatefile)).replace(os.sep,"/"))
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_reg.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'w', encoding="utf-8")
f.write(txt)
return prnfilename
def createLinesProfile(self):
"""
OutputDirPath mandatory
"""
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
localpath = localpath.replace(os.sep,'/')
txt = LAProcessor.cCVLLASeparatorProfile % (listfile,localpath)
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_gl.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'wb')
f.write(txt)
return prnfilename
def createLAProfile(self):
"""
OutputDirPath mandatory
"""
# get all images files
localpath = os.path.abspath("./%s/col/%s"%(self.coldir,self.docid))
l = glob.glob(os.path.join(localpath, "*.jpg"))
l.sort()
listfile = ";".join(l)
listfile = listfile.replace(os.sep,"/")
localpath = localpath.replace(os.sep,'/')
txt = LAProcessor.cCVLLAProfile % (listfile,localpath,self.bKeepRegion)
# print txt
# wb mandatory for crlf in windows
prnfilename = "%s%s%s_la.prn"%(self.coldir,os.sep,self.docid)
f=open(prnfilename,'wb')
f.write(txt)
return prnfilename
def storeMPXML(self,lFiles):
"""
store files in lFiles as mpxml
"""
docDir = os.path.join(self.coldir+os.sep+'col',self.docid)
doc = MultiPageXml.makeMultiPageXml(lFiles)
sMPXML = docDir+".mpxml"
# print sMPXML
doc.write(sMPXML,encoding="UTF-8",pretty_print=True,xml_declaration=True)
# trace("\t\t- validating the MultiPageXml ...")
# if not MultiPageXml.validate(doc):
# traceln(" *** WARNING: XML file is invalid against the schema: '%s'"%self.outputFileName)
# traceln(" Ok!")
return doc, sMPXML
def extractFileNamesFromMPXML(self,doc):
"""
to insure correct file order !
"""
xmlpath=os.path.abspath("%s%s%s%s%s" % (self.coldir,os.sep,'col',os.sep,self.docid))
lNd = PageXml.getChildByName(doc.getroot(), 'Page')
# for i in lNd:print i
return list(map(lambda x:"%s%s%s.xml"%(xmlpath,os.sep,x.get('imageFilename')[:-4]), lNd))
def performLA(self,doc):
"""
# for document doc
## find the page where the template is
## store it as template (check borders))
## generate profile for table registration
## (execution)
## create profile for lA
## (execution)
"""
# lNumPages = []
if self.bTemplate or self.bBaseLine or self.bSeparator:
# extract list of files sorted as in MPXML
lFullPathXMLNames = self.extractFileNamesFromMPXML(doc)
nbPages = len(lFullPathXMLNames)
## 1 generate xml files if only pxml are there
xmlpath=os.path.abspath(os.path.join (self.coldir,'col',self.docid))
lXMLNames = [ "%s%s%s"%(xmlpath,os.sep,name) for name in os.listdir(xmlpath) if os.path.basename(name)[-4:] =='.xml']
isXml = [] != lXMLNames
if isXml:
[ os.remove("%s%s%s"%(xmlpath,os.sep,name)) for name in os.listdir(xmlpath) if os.path.basename(name)[-4:] =='.xml']
isXml = False
isPXml = [] != [ name for name in os.listdir(xmlpath) if os.path.basename(name)[-5:] =='.pxml']
assert not isXml and isPXml
# recreate doc? (mpxml)
lPXMLNames = [ name for name in os.listdir(xmlpath) if os.path.basename(name)[-5:] =='.pxml']
if not isXml:
# copy pxml in xml
for name in lPXMLNames:
oldname = "%s%s%s" %(xmlpath,os.sep,name)
newname = "%s%s%s" % (xmlpath,os.sep,name)
newname = newname[:-5]+'.xml'
tmpdoc = etree.parse(oldname)
tmpdoc.write(newname,encoding="UTF-8", pretty_print=True,xml_declaration=True)
if self.bKeepTL:
# keep ltextLione
lTextLines=[]
lPages = PageXml.getChildByName(doc.getroot(),'Page')
for page in lPages:
lTextLines.append(PageXml.getChildByName(page,'TextLine'))
## Table registration
if self.bTemplate:
if self.sTemplateFile is None:
templatePage = self.findTemplate(doc)
if templatePage is None:
traceln("No table found in this document: %s" % self.docid)
else:
oldOut= self.outputFileName
self.outputFileName = "%s%s%s.templ.xml" % (self.coldir,os.sep,self.docid)
stemplatefile = "%s%s%s.templ.xml" % (self.coldir,os.sep,self.docid)
print (stemplatefile)
self.writeDom(templatePage, True)
self.outputFileName = oldOut
prnregfilename= self.createRegistrationProfile(stemplatefile)
else:
# raise Exception, 'file template stuff: to be done'
prnregfilename= self.createRegistrationProfile(self.sTemplateFile)
job = LAProcessor.cNomacs+ " --batch %s"%(prnregfilename)
os.system(job)
traceln('table registration done: %s'% prnregfilename)
## separator detection
if self.bSeparator:
prnglfilename = self.createLinesProfile()
job = LAProcessor.cNomacs+ " --batch %s"%(prnglfilename)
os.system(job)
traceln( 'GL done: %s' % prnglfilename)
## baseline detection
if self.bBaseLine:
prnlafilename = self.createLAProfile()
# job = LAProcessor.cNomacs+ " --batch %s"%(prnlafilename)
job = LAProcessor.cNomacsold+ " --batch %s"%(prnlafilename)
os.system(job)
traceln('LA done: %s' % prnlafilename)
if self.bTemplate or self.bBaseLine or self.bSeparator:
doc, sMPXML= self.storeMPXML(lFullPathXMLNames)
# Does not work with URO LA!
if self.bKeepTL:
self.reintegrateTextIntoCells(doc,lTextLines)
## text rectangles as textline region
if self.bRegularTextLine:
self.regularTextLines(doc)
doc.write(sMPXML,encoding="UTF-8",pretty_print=True,xml_declaration=True)
return doc, nbPages
def regularTextLinesold(self,doc):
"""
from a baseline: create a regular TextLine:
also: for slanted baseline:
"""
from shapely.geometry import LineString
from shapely.affinity import translate
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
lTextLines = PageXml.getChildByName(doc.getroot(),'TextLine')
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
xpath = "./a:%s" % ("Baseline")
lBL = tl.xpath(xpath,namespaces={"a": self.xmlns})
baseline = lBL[0]
sPoints=baseline.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
try:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
except ValueError:print (tl)
plg = Polygon(lXY)
line=LineString(lXY)
# 50 seems to large: the manual GT is 30 ? not always!
iHeight = 30 # in pixel
x1,y1, x2,y2 = plg.getBoundingBox()
if coord is not None:
coord.set('points',"%d,%d %d,%d %d,%d %d,%d" % (x1,y1-iHeight,x2,y1-iHeight,x2,y2,x1,y2))
else:
print (tl)
def regularTextLines(self,doc):
"""
from a baseline: create a regular TextLine:
"""
from shapely.geometry import LineString
from shapely.affinity import translate
self.xmlns='http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15'
lTextLines = PageXml.getChildByName(doc.getroot(),'TextLine')
for tl in lTextLines:
#get Coords
xpath = "./a:%s" % ("Coords")
lCoords = tl.xpath(xpath,namespaces={"a": self.xmlns})
coord= lCoords[0]
xpath = "./a:%s" % ("Baseline")
lBL = tl.xpath(xpath,namespaces={"a": self.xmlns})
try:baseline = lBL[0]
except IndexError:continue
sPoints=baseline.get('points')
lsPair = sPoints.split(' ')
lXY = list()
for sPair in lsPair:
try:
(sx,sy) = sPair.split(',')
lXY.append( (int(sx), int(sy)) )
except ValueError:print (tl)
#plg = Polygon(lXY)
try: line=LineString(lXY)
except ValueError: continue # LineStrings must have at least 2 coordinate tuples
topline=translate(line,yoff=-20)
#iHeight = 20 # in pixel
#x1,y1, x2,y2 = topline.getBoundingBox()
if coord is not None:
spoints = ' '.join("%s,%s"%(int(x[0]),int(x[1])) for x in line.coords)
lp=list(topline.coords)
lp.reverse()
spoints =spoints+ ' ' +' '.join("%s,%s"%(int(x[0]),int(x[1])) for x in lp)
#spoints = ' '.join("%s,%s"%(x[0],x[1]) for x in pp.coords)
#coord.set('points',"%d,%d %d,%d %d,%d %d,%d" % (x1,y1-iHeight,x2,y1-iHeight,x2,y2,x1,y2))
coord.set('points',spoints)
else:
print (tl)
# print tl
def run(self,doc):
"""
GT from TextRegion
or GT from Table
input mpxml (GT)
delete TextLine
"""
if not (self.bTemplate or self.bBaseLine or self.bSeparator) and self.bRegularTextLine:
self.regularTextLines(doc)
self.writeDom(doc, True)
else:
doc,nbpages = self.performLA(doc)
return doc
if __name__ == "__main__":
# for each document
## find the page where the template is
## store it as template (check borders))
## generate profile for table registration
## (execution)
## create profile for lA
## (execution)
tp = LAProcessor()
#prepare for the parsing of the command line
tp.createCommandLineParser()
tp.add_option("--coldir", dest="coldir", action="store", type="string", help="collection folder")
tp.add_option("--docid", dest="docid", action="store", type="string", help="document id")
tp.add_option("--bl", dest="bBaseline", action="store_true", default=False, help="detect baselines")
# tp.add_option("--region", dest="bRegion", action="store_true", default=False, help="keep Region")
tp.add_option("--tl", dest="bTL", action="store_true", default=False, help="keep textlines")
tp.add_option("--sep", dest="bSeparator", action="store_true", default=False, help="detect separator (graphical lines)")
tp.add_option("--regTL", dest="regTL", action="store_true", default=False, help="generate regular TextLines")
tp.add_option("--form", dest="template", action="store_true", default=False, help="perform template registration")
tp.add_option("--formfile", dest="templatefile", action="store", type='string', default=None,help="use this template file (pagexml) for registration")
#tp.add_option("--form", dest="template", action="store", type="string", help="perform template registration")
#parse the command line
dParams, args = tp.parseCommandLine()
#Now we are back to the normal programmatic mode, we set the componenet parameters
tp.setParams(dParams)
doc = tp.loadDom()
tp.run(doc)
| bsd-3-clause |
barbuza/django | django/utils/decorators.py | 126 | 6875 | "Functions that help with dynamically creating decorators for views."
try:
from contextlib import ContextDecorator
except ImportError:
ContextDecorator = None
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
from django.utils import six
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator, name=''):
"""
Converts a function decorator into a method decorator
"""
# 'obj' can be a class or a function. If 'obj' is a function at the time it
# is passed to _dec, it will eventually be a method of the class it is
# defined on. If 'obj' is a class, the 'name' is required to be the name
# of the method that will be decorated.
def _dec(obj):
is_class = isinstance(obj, type)
if is_class:
if name and hasattr(obj, name):
func = getattr(obj, name)
if not callable(func):
raise TypeError(
"Cannot decorate '{0}' as it isn't a callable "
"attribute of {1} ({2})".format(name, obj, func)
)
else:
raise ValueError(
"The keyword argument `name` must be the name of a method "
"of the decorated class: {0}. Got '{1}' instead".format(
obj, name,
)
)
else:
func = obj
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func.__get__(self, type(self))(*args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
if is_class:
setattr(obj, name, _wrapper)
return obj
return _wrapper
update_wrapper(_dec, decorator, assigned=available_attrs(decorator))
# Change the name to aid debugging.
if hasattr(decorator, '__name__'):
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
else:
_dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
if ContextDecorator is None:
# ContextDecorator was introduced in Python 3.2
# See https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
"""
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
class classproperty(object):
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, owner):
return self.fget(owner)
def getter(self, method):
self.fget = method
return self
| bsd-3-clause |
OneBitSoftware/jwtSample | src/Spa/env1/Lib/site-packages/pip/_vendor/html5lib/trie/datrie.py | 1301 | 1178 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip._vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mit |
metaml/nupic | src/nupic/regions/ImageSensorFilters/GaussianBlur.py | 17 | 1737 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from PIL import ImageEnhance
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class GaussianBlur(BaseFilter):
"""
Apply a Gaussian blur to the image.
"""
def __init__(self, level=1):
"""
@param level -- Number of times to blur.
"""
BaseFilter.__init__(self)
self.level = level
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
mask = image.split()[1]
for i in xrange(self.level):
sharpness_enhancer = ImageEnhance.Sharpness(image.split()[0])
image = sharpness_enhancer.enhance(0.0)
image.putalpha(mask)
return image
| agpl-3.0 |
HybridF5/tempest | tempest/api/data_processing/base.py | 6 | 16230 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import copy
import six
from tempest import config
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
"""Default templates.
There should always be at least a master1 and a worker1 node
group template."""
BASE_VANILLA_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'resourcemanager',
'hiveserver']
},
'master2': {
'count': 1,
'node_processes': ['oozie', 'historyserver',
'secondarynamenode']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'nodemanager'],
'node_configs': {
'MapReduce': {
'yarn.app.mapreduce.am.resource.mb': 256,
'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
'YARN': {
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.scheduler.maximum-allocation-mb': 1024,
'yarn.nodemanager.vmem-check-enabled': False
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_SPARK_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'master']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'slave']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_CDH_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['CLOUDERA_MANAGER']
},
'master2': {
'count': 1,
'node_processes': ['HDFS_NAMENODE',
'YARN_RESOURCEMANAGER']
},
'master3': {
'count': 1,
'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
'HDFS_SECONDARYNAMENODE',
'HIVE_METASTORE', 'HIVE_SERVER2']
},
'worker1': {
'count': 1,
'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
}
},
'cluster_configs': {
'HDFS': {
'dfs_replication': 1
}
}
}
DEFAULT_TEMPLATES = {
'vanilla': OrderedDict([
('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
('1.2.1', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'jobtracker']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'tasktracker'],
'node_configs': {
'HDFS': {
'Data Node Heap Size': 1024
},
'MapReduce': {
'Task Tracker Heap Size': 1024
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
},
'MapReduce': {
'mapred.map.tasks.speculative.execution': False,
'mapred.child.java.opts': '-Xmx500m'
},
'general': {
'Enable Swift': False
}
}
})
]),
'hdp': OrderedDict([
('2.0.6', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
'HISTORYSERVER', 'RESOURCEMANAGER',
'GANGLIA_SERVER', 'NAGIOS_SERVER',
'OOZIE_SERVER']
},
'worker1': {
'count': 1,
'node_processes': ['HDFS_CLIENT', 'DATANODE',
'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
'MAPREDUCE2_CLIENT', 'NODEMANAGER',
'PIG', 'OOZIE_CLIENT']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
})
]),
'spark': OrderedDict([
('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
]),
'cdh': OrderedDict([
('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
('5', copy.deepcopy(BASE_CDH_DESC))
]),
}
class BaseDataProcessingTest(tempest.test.BaseTestCase):
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseDataProcessingTest, cls).skip_checks()
if not CONF.service_available.sahara:
raise cls.skipException('Sahara support is required')
cls.default_plugin = cls._get_default_plugin()
@classmethod
def setup_clients(cls):
super(BaseDataProcessingTest, cls).setup_clients()
cls.client = cls.os.data_processing_client
@classmethod
def resource_setup(cls):
super(BaseDataProcessingTest, cls).resource_setup()
cls.default_version = cls._get_default_version()
if cls.default_plugin is not None and cls.default_version is None:
raise exceptions.InvalidConfiguration(
message="No known Sahara plugin version was found")
cls.flavor_ref = CONF.compute.flavor_ref
# add lists for watched resources
cls._node_group_templates = []
cls._cluster_templates = []
cls._data_sources = []
cls._job_binary_internals = []
cls._job_binaries = []
cls._jobs = []
@classmethod
def resource_cleanup(cls):
cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
cls.client.delete_cluster_template)
cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
cls.client.delete_node_group_template)
cls.cleanup_resources(getattr(cls, '_jobs', []), cls.client.delete_job)
cls.cleanup_resources(getattr(cls, '_job_binaries', []),
cls.client.delete_job_binary)
cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
cls.client.delete_job_binary_internal)
cls.cleanup_resources(getattr(cls, '_data_sources', []),
cls.client.delete_data_source)
super(BaseDataProcessingTest, cls).resource_cleanup()
@staticmethod
def cleanup_resources(resource_id_list, method):
for resource_id in resource_id_list:
try:
method(resource_id)
except lib_exc.NotFound:
# ignore errors while auto removing created resource
pass
@classmethod
def create_node_group_template(cls, name, plugin_name, hadoop_version,
node_processes, flavor_id,
node_configs=None, **kwargs):
"""Creates watched node group template with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
resp_body = cls.client.create_node_group_template(name, plugin_name,
hadoop_version,
node_processes,
flavor_id,
node_configs,
**kwargs)
resp_body = resp_body['node_group_template']
# store id of created node group template
cls._node_group_templates.append(resp_body['id'])
return resp_body
@classmethod
def create_cluster_template(cls, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None, **kwargs):
"""Creates watched cluster template with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
resp_body = cls.client.create_cluster_template(name, plugin_name,
hadoop_version,
node_groups,
cluster_configs,
**kwargs)
resp_body = resp_body['cluster_template']
# store id of created cluster template
cls._cluster_templates.append(resp_body['id'])
return resp_body
@classmethod
def create_data_source(cls, name, type, url, **kwargs):
"""Creates watched data source with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
resp_body = cls.client.create_data_source(name, type, url, **kwargs)
resp_body = resp_body['data_source']
# store id of created data source
cls._data_sources.append(resp_body['id'])
return resp_body
@classmethod
def create_job_binary_internal(cls, name, data):
"""Creates watched job binary internal with specified params.
It returns created object. All resources created in this method will
be automatically removed in tearDownClass method.
"""
resp_body = cls.client.create_job_binary_internal(name, data)
resp_body = resp_body['job_binary_internal']
# store id of created job binary internal
cls._job_binary_internals.append(resp_body['id'])
return resp_body
@classmethod
def create_job_binary(cls, name, url, extra=None, **kwargs):
"""Creates watched job binary with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
resp_body = resp_body['job_binary']
# store id of created job binary
cls._job_binaries.append(resp_body['id'])
return resp_body
@classmethod
def create_job(cls, name, job_type, mains, libs=None, **kwargs):
"""Creates watched job with specified params.
It supports passing additional params using kwargs and returns created
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
resp_body = cls.client.create_job(name,
job_type, mains, libs, **kwargs)
resp_body = resp_body['job']
# store id of created job
cls._jobs.append(resp_body['id'])
return resp_body
@classmethod
def _get_default_plugin(cls):
"""Returns the default plugin used for testing."""
if len(CONF.data_processing_feature_enabled.plugins) == 0:
return None
for plugin in CONF.data_processing_feature_enabled.plugins:
if plugin in DEFAULT_TEMPLATES.keys():
break
else:
plugin = ''
return plugin
@classmethod
def _get_default_version(cls):
"""Returns the default plugin version used for testing.
This is gathered separately from the plugin to allow
the usage of plugin name in skip_checks. This method is
rather invoked into resource_setup, which allows API calls
and exceptions.
"""
if not cls.default_plugin:
return None
plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
if version in plugin['versions']:
break
else:
version = None
return version
@classmethod
def get_node_group_template(cls, nodegroup='worker1'):
"""Returns a node group template for the default plugin."""
try:
plugin_data = (
DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
)
nodegroup_data = plugin_data['NODES'][nodegroup]
node_group_template = {
'description': 'Test node group template',
'plugin_name': cls.default_plugin,
'hadoop_version': cls.default_version,
'node_processes': nodegroup_data['node_processes'],
'flavor_id': cls.flavor_ref,
'node_configs': nodegroup_data.get('node_configs', {}),
}
return node_group_template
except (IndexError, KeyError):
return None
@classmethod
def get_cluster_template(cls, node_group_template_ids=None):
"""Returns a cluster template for the default plugin.
node_group_template_defined contains the type and ID of pre-defined
node group templates that have to be used in the cluster template
(instead of dynamically defining them with 'node_processes').
"""
if node_group_template_ids is None:
node_group_template_ids = {}
try:
plugin_data = (
DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
)
all_node_groups = []
for ng_name, ng_data in six.iteritems(plugin_data['NODES']):
node_group = {
'name': '%s-node' % (ng_name),
'flavor_id': cls.flavor_ref,
'count': ng_data['count']
}
if ng_name in node_group_template_ids.keys():
# node group already defined, use it
node_group['node_group_template_id'] = (
node_group_template_ids[ng_name]
)
else:
# node_processes list defined on-the-fly
node_group['node_processes'] = ng_data['node_processes']
if 'node_configs' in ng_data:
node_group['node_configs'] = ng_data['node_configs']
all_node_groups.append(node_group)
cluster_template = {
'description': 'Test cluster template',
'plugin_name': cls.default_plugin,
'hadoop_version': cls.default_version,
'cluster_configs': plugin_data.get('cluster_configs', {}),
'node_groups': all_node_groups,
}
return cluster_template
except (IndexError, KeyError):
return None
| apache-2.0 |
jzt5132/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
bukepo/openthread | tools/harness-automation/cases/border_7_1_4.py | 9 | 1877 | #!/usr/bin/env python
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Border_7_1_4(HarnessCase):
role = HarnessCase.ROLE_BORDER
case = '7 1 4'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
tysonholub/twilio-python | tests/integration/verify/v2/service/test_messaging_configuration.py | 1 | 8820 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MessagingConfigurationTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations.create(country="country", messaging_service_sid="MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'Country': "country", 'MessagingServiceSid': "MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/MessagingConfigurations',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"country": "CA",
"messaging_service_sid": "MGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations/CA"
}
'''
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations.create(country="country", messaging_service_sid="MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").update(messaging_service_sid="MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'MessagingServiceSid': "MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/MessagingConfigurations/country',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"country": "CA",
"messaging_service_sid": "MGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations/CA"
}
'''
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").update(messaging_service_sid="MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/MessagingConfigurations/country',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"country": "CA",
"messaging_service_sid": "MGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations/CA"
}
'''
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations.list()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/MessagingConfigurations',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"messaging_configurations": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations?PageSize=50&Page=0",
"next_page_url": null,
"key": "messaging_configurations"
}
}
'''
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"messaging_configurations": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"country": "CA",
"messaging_service_sid": "MGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations/CA"
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/MessagingConfigurations?PageSize=50&Page=0",
"next_page_url": null,
"key": "messaging_configurations"
}
}
'''
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/MessagingConfigurations/country',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.verify.v2.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messaging_configurations(country="country").delete()
self.assertTrue(actual)
| mit |
mbauskar/helpdesk-erpnext | erpnext/startup/report_data_map.py | 6 | 8686 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
# mappings for table dumps
# "remember to add indexes!"
data_map = {
"Company": {
"columns": ["name"],
"conditions": ["docstatus < 2"]
},
"Fiscal Year": {
"columns": ["name", "year_start_date", "year_end_date"],
"conditions": ["docstatus < 2"],
},
# Accounts
"Account": {
"columns": ["name", "parent_account", "lft", "rgt", "report_type",
"company", "is_group"],
"conditions": ["docstatus < 2"],
"order_by": "lft",
"links": {
"company": ["Company", "name"],
}
},
"Cost Center": {
"columns": ["name", "lft", "rgt"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"GL Entry": {
"columns": ["name", "account", "posting_date", "cost_center", "debit", "credit",
"is_opening", "company", "voucher_type", "voucher_no", "remarks"],
"order_by": "posting_date, account",
"links": {
"account": ["Account", "name"],
"company": ["Company", "name"],
"cost_center": ["Cost Center", "name"]
}
},
# Stock
"Item": {
"columns": ["name", "if(item_name=name, '', item_name) as item_name", "description",
"item_group as parent_item_group", "stock_uom", "brand", "valuation_method",
"re_order_level", "re_order_qty"],
# "conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_item_group": ["Item Group", "name"],
"brand": ["Brand", "name"]
}
},
"Item Group": {
"columns": ["name", "parent_item_group"],
# "conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Brand": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Project": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Warehouse": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Ledger Entry": {
"columns": ["name", "posting_date", "posting_time", "item_code", "warehouse",
"actual_qty as qty", "voucher_type", "voucher_no", "project",
"incoming_rate as incoming_rate", "stock_uom", "serial_no",
"qty_after_transaction", "valuation_rate"],
"order_by": "posting_date, posting_time, name",
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"],
"project": ["Project", "name"]
},
"force_index": "posting_sort_index"
},
"Serial No": {
"columns": ["name", "purchase_rate as incoming_rate"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Entry": {
"columns": ["name", "purpose"],
"conditions": ["docstatus=1"],
"order_by": "posting_date, posting_time, name",
},
"Production Order": {
"columns": ["name", "production_item as item_code",
"(qty - produced_qty) as qty",
"fg_warehouse as warehouse"],
"conditions": ["docstatus=1", "status != 'Stopped'", "ifnull(fg_warehouse, '')!=''",
"qty > produced_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Material Request Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(qty - ordered_qty) as qty"],
"from": "`tabMaterial Request Item` item, `tabMaterial Request` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > ordered_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Purchase Order Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(qty - received_qty) as qty"],
"from": "`tabPurchase Order Item` item, `tabPurchase Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > received_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Sales Order Item": {
"columns": ["item.name as name", "item_code", "(qty - delivered_qty) as qty", "warehouse"],
"from": "`tabSales Order Item` item, `tabSales Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > delivered_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
# Sales
"Customer": {
"columns": ["name", "if(customer_name=name, '', customer_name) as customer_name",
"customer_group as parent_customer_group", "territory as parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_customer_group": ["Customer Group", "name"],
"parent_territory": ["Territory", "name"],
}
},
"Customer Group": {
"columns": ["name", "parent_customer_group"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Territory": {
"columns": ["name", "parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Sales Invoice": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Invoice Item": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Sales Order": {
"columns": ["name", "customer", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "transaction_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Order Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Order", "name"],
"item_code": ["Item", "name"]
}
},
"Delivery Note": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Delivery Note Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Delivery Note", "name"],
"item_code": ["Item", "name"]
}
},
"Supplier": {
"columns": ["name", "if(supplier_name=name, '', supplier_name) as supplier_name",
"supplier_type as parent_supplier_type"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_supplier_type": ["Supplier Type", "name"],
}
},
"Supplier Type": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Purchase Invoice": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Invoice Item": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Order": {
"columns": ["name", "supplier", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Order Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Order", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Receipt": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Receipt Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Receipt", "name"],
"item_code": ["Item", "name"]
}
},
# Support
"Issue": {
"columns": ["name","status","creation","resolution_date","first_responded_on"],
"conditions": ["docstatus < 2"],
"order_by": "creation"
}
}
| agpl-3.0 |
KokareIITP/django | django/conf/locale/ru/formats.py | 1059 | 1267 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y г. G:i'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
vlajos/bitcoin | qa/rpc-tests/rawtransactions.py | 87 | 5949 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from pprint import pprint
from time import sleep
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5);
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0);
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0);
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
errorString = ""
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Missing inputs" in errorString, True);
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2);
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2);
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break;
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
if __name__ == '__main__':
RawTransactionsTest().main()
| mit |
cloudify-cosmo/flask-securest | flask_securest/rest_security.py | 1 | 10608 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import StringIO
from collections import OrderedDict
from functools import wraps
from flask import (current_app,
abort,
request,
g as flask_request_globals)
from flask_restful import Resource
from flask_securest import utils
from flask_securest.exceptions import FlaskSecuRESTException, \
AuthenticationException, AuthorizationException
from flask_securest.userstores.abstract_userstore import AbstractUserstore
from flask_securest.authentication_providers.abstract_authentication_provider \
import AbstractAuthenticationProvider
from flask_securest.authorization_providers.abstract_authorization_provider \
import AbstractAuthorizationProvider
SECURED_MODE = 'app_secured'
SECURITY_CTX_HTTP_METHOD = 'http_method'
SECURITY_CTX_ENDPOINT = 'endpoint'
SECURITY_CTX_USERNAME = 'username'
SECURITY_CTX_PRINCIPALS = 'principals'
class SecuREST(object):
def __init__(self, app):
self.app = app
self.app.config[SECURED_MODE] = True
self.app.securest_logger = None
self.app.securest_unauthorized_user_handler = None
self.app.securest_authentication_providers = OrderedDict()
self.app.securest_authorization_provider = None
self.app.securest_userstore_driver = None
self.app.skip_auth_hook = None
self.app.before_first_request(_validate_configuration)
self.app.before_request(_clean_security_context)
@property
def skip_auth_hook(self):
return self.app.skip_auth_hook
@skip_auth_hook.setter
def skip_auth_hook(self, value):
self.app.skip_auth_hook = value
@property
def unauthorized_user_handler(self):
return self.app.securest_unauthorized_user_handler
@unauthorized_user_handler.setter
def unauthorized_user_handler(self, value):
self.app.securest_unauthorized_user_handler = value
@property
def logger(self):
return self.app.securest_logger
@logger.setter
def logger(self, logger):
self.app.securest_logger = logger
@property
def userstore_driver(self):
return self.app.securest_userstore_driver
@userstore_driver.setter
def userstore_driver(self, userstore):
"""
Registers the given userstore driver.
:param userstore: the userstore driver to be set
"""
if not isinstance(userstore, AbstractUserstore):
err_msg = 'failed to register userstore driver "{0}", Error: ' \
'driver does not inherit "{1}"'\
.format(utils.get_instance_class_fqn(userstore),
utils.get_class_fqn(AbstractUserstore))
_log(self.app.securest_logger, 'critical', err_msg)
raise FlaskSecuRESTException(err_msg)
self.app.securest_userstore_driver = userstore
def register_authentication_provider(self, name, provider):
"""
Registers the given authentication method.
:param name: A unique name for the authentication provider, required
for logging
:param provider: appends the given authentication provider to the list
of providers
NOTE: Pay attention to the order of the registered providers!
authentication will be attempted on each of the registered providers,
according to their registration order, until successful.
"""
if not isinstance(provider, AbstractAuthenticationProvider):
err_msg = 'failed to register authentication provider "{0}", ' \
'Error: provider does not inherit "{1}"'\
.format(utils.get_instance_class_fqn(provider),
utils.get_class_fqn(AbstractAuthenticationProvider))
_log(self.app.securest_logger, 'critical', err_msg)
raise FlaskSecuRESTException(err_msg)
self.app.securest_authentication_providers[name] = provider
@property
def authorization_provider(self):
return self.app.securest_authorization_provider
@authorization_provider.setter
def authorization_provider(self, provider):
"""
Registers the given authorization provider.
:param provider: the authorization provider to be set
"""
if not isinstance(provider, AbstractAuthorizationProvider):
err_msg = 'failed to register authorization provider "{0}", ' \
'Error: provider does not inherit "{1}"' \
.format(utils.get_instance_class_fqn(provider),
utils.get_class_fqn(AbstractAuthorizationProvider))
_log(self.app.securest_logger, 'critical', err_msg)
raise FlaskSecuRESTException(err_msg)
self.app.securest_authorization_provider = provider
def _validate_configuration():
if not current_app.securest_authentication_providers:
raise FlaskSecuRESTException('authentication providers not set')
def _clean_security_context():
flask_request_globals.security_context = {
SECURITY_CTX_HTTP_METHOD: None,
SECURITY_CTX_ENDPOINT: None,
SECURITY_CTX_USERNAME: None,
SECURITY_CTX_PRINCIPALS: None
}
def auth_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if _is_secured_request_context():
try:
_set_security_context_value(SECURITY_CTX_ENDPOINT,
request.path)
_set_security_context_value(SECURITY_CTX_HTTP_METHOD,
request.method)
authenticate()
if current_app.securest_authorization_provider:
authorize()
except Exception as e:
_log(current_app.securest_logger, 'error', e)
handle_unauthorized_user()
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
def _is_secured_request_context():
return current_app.config.get(SECURED_MODE) and not \
(current_app.skip_auth_hook and
current_app.skip_auth_hook(request))
def handle_unauthorized_user():
if current_app.securest_unauthorized_user_handler:
current_app.securest_unauthorized_user_handler()
else:
abort(401)
def get_request_origin():
request_origin_ip = request.remote_addr
if request_origin_ip:
request_origin = '[{0}]'.format(request_origin_ip)
else:
request_origin = '[unknown]'
return request_origin
def authenticate():
username = None
error_msg = StringIO.StringIO()
request_origin = get_request_origin()
userstore_driver = current_app.securest_userstore_driver
authentication_providers = current_app.securest_authentication_providers
for auth_method, auth_provider in authentication_providers.iteritems():
try:
username = auth_provider.authenticate(userstore_driver)
if not username:
raise AuthenticationException('return username is empty')
# TODO the user obj might not have a 'username' field,
# we should use smarter logging
msg = 'user "{0}" authenticated successfully from host {1}, ' \
'authentication provider: {2}'\
.format(username, request_origin, auth_method)
_log(current_app.securest_logger, 'info', msg)
break
except Exception as e:
if not error_msg.getvalue():
error_msg.write('User unauthorized; '
'user tried to login from host {0};'
'\nall authentication methods failed:'
.format(request_origin))
error_msg.write('\n{0} authenticator: {1}'
.format(auth_method, e))
continue # try the next authentication method until successful
if not username:
raise AuthenticationException(error_msg.getvalue())
_set_security_context_value(SECURITY_CTX_USERNAME, username)
_set_security_context_value(SECURITY_CTX_PRINCIPALS,
_get_all_principals_for_current_user())
def authorize():
authorization_provider = current_app.securest_authorization_provider
is_authorized = authorization_provider.authorize()
if is_authorized:
msg = 'user "{0}" is authorized to call {1} on {2}'.format(
get_username(), get_http_method(), get_endpoint())
_log(current_app.securest_logger, 'info', msg)
else:
raise AuthorizationException('User {0} is not authorized to call {1}'
' on {2}'.format(get_username(),
get_http_method(),
get_endpoint()))
def _get_all_principals_for_current_user():
if current_app.securest_userstore_driver:
principals_list = current_app.securest_userstore_driver.\
get_all_principals_for_user(get_username())
else:
principals_list = get_username()
return principals_list
def _get_security_context_value(key):
return flask_request_globals.security_context.get(key)
def _set_security_context_value(key, value):
flask_request_globals.security_context[key] = value
def get_username():
return _get_security_context_value(SECURITY_CTX_USERNAME)
def get_endpoint():
return _get_security_context_value(SECURITY_CTX_ENDPOINT)
def get_http_method():
return _get_security_context_value(SECURITY_CTX_HTTP_METHOD)
def get_principals_list():
return _get_security_context_value(SECURITY_CTX_PRINCIPALS)
def _log(logger, method, message):
if logger:
logging_method = getattr(logger, method)
logging_method(message)
class SecuredResource(Resource):
secured = True
method_decorators = [auth_required]
| apache-2.0 |
tedder/ansible | lib/ansible/modules/cloud/vmware/vca_vapp.py | 55 | 11797 | #!/usr/bin/python
# Copyright: (c) 2015, Ansible, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vca_vapp
short_description: Manages vCloud Air vApp instances.
description:
- This module will actively managed vCloud Air vApp instances. Instances
can be created and deleted as well as both deployed and undeployed.
version_added: "2.0"
author:
- Peter Sprygada (@privateip)
notes:
- VMware sold their vCloud Air service in Q2 2017.
- VMware made significant changes to the pyvcloud interface around this time. The C(vca_vapp) module relies on now deprecated code.
- Mileage with C(vca_vapp) may vary as vCloud Director APIs advance.
- A viable alternative maybe U(https://github.com/vmware/ansible-module-vcloud-director)
requirements:
- pyvcloud <= 18.2.2
options:
vapp_name:
description:
- The name of the vCloud Air vApp instance
required: yes
template_name:
description:
- The name of the vApp template to use to create the vApp instance. If
the I(state) is not `absent` then the I(template_name) value must be
provided. The I(template_name) must be previously uploaded to the
catalog specified by I(catalog_name)
network_name:
description:
- The name of the network that should be attached to the virtual machine
in the vApp. The virtual network specified must already be created in
the vCloud Air VDC. If the I(state) is not 'absent' then the
I(network_name) argument must be provided.
network_mode:
description:
- Configures the mode of the network connection.
default: pool
choices: ['pool', 'dhcp', 'static']
vm_name:
description:
- The name of the virtual machine instance in the vApp to manage.
vm_cpus:
description:
- The number of vCPUs to configure for the VM in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherwise it is applied to all VMs in the vApp.
vm_memory:
description:
- The amount of memory in MB to allocate to VMs in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherise it is applied to all VMs in the vApp.
operation:
description:
- Specifies an operation to be performed on the vApp.
default: noop
choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
state:
description:
- Configures the state of the vApp.
default: present
choices: ['present', 'absent', 'deployed', 'undeployed']
username:
description:
- The vCloud Air username to use during authentication
password:
description:
- The vCloud Air password to use during authentication
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp
host:
description:
- The authentication host to be used when service type is vcd.
api_version:
description:
- The api version to be used with the vca
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
default: vca
choices: [ "vca", "vchs", "vcd" ]
vdc_name:
description:
- The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
extends_documentation_fragment: vca
'''
EXAMPLES = '''
- name: Creates a new vApp in a VCA instance
vca_vapp:
vapp_name: tower
state: present
template_name: 'Ubuntu Server 12.04 LTS (amd64 20150127)'
vdc_name: VDC1
instance_id: '<your instance id here>'
username: '<your username here>'
password: '<your password here>'
delegate_to: localhost
'''
from ansible.module_utils.vca import VcaAnsibleModule, VcaError
DEFAULT_VAPP_OPERATION = 'noop'
VAPP_STATUS = {
'Powered off': 'poweroff',
'Powered on': 'poweron',
'Suspended': 'suspend'
}
VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
'reboot', 'reset', 'noop']
def get_instance(module):
vapp_name = module.params['vapp_name']
inst = dict(vapp_name=vapp_name, state='absent')
try:
vapp = module.get_vapp(vapp_name)
if vapp:
status = module.vca.get_status(vapp.me.get_status())
inst['status'] = VAPP_STATUS.get(status, 'unknown')
inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
return inst
except VcaError:
return inst
def create(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
template_name = module.params['template_name']
catalog_name = module.params['catalog_name']
network_name = module.params['network_name']
network_mode = module.params['network_mode']
vm_name = module.params['vm_name']
vm_cpus = module.params['vm_cpus']
vm_memory = module.params['vm_memory']
deploy = module.params['state'] == 'deploy'
poweron = module.params['operation'] == 'poweron'
task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
catalog_name, network_name, 'bridged',
vm_name, vm_cpus, vm_memory, deploy, poweron)
if task is False:
module.fail('Failed to create vapp: %s' % vapp_name)
module.vca.block_until_completed(task)
# Connect the network to the Vapp/VM and return asigned IP
if network_name is not None:
vm_ip = connect_to_network(module, vdc_name, vapp_name, network_name, network_mode)
return vm_ip
def delete(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
module.vca.delete_vapp(vdc_name, vapp_name)
def do_operation(module):
vapp_name = module.params['vapp_name']
operation = module.params['operation']
vm_name = module.params.get('vm_name')
vm = None
if vm_name:
vm = module.get_vm(vapp_name, vm_name)
if operation == 'poweron':
operation = 'powerOn'
elif operation == 'poweroff':
operation = 'powerOff'
cmd = 'power:%s' % operation
module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
def set_state(module):
state = module.params['state']
vapp = module.get_vapp(module.params['vapp_name'])
if state == 'deployed':
action = module.params['operation'] == 'poweron'
if not vapp.deploy(action):
module.fail('unable to deploy vapp')
elif state == 'undeployed':
action = module.params['operation']
if action == 'poweroff':
action = 'powerOff'
elif action != 'suspend':
action = None
if not vapp.undeploy(action):
module.fail('unable to undeploy vapp')
def connect_to_network(module, vdc_name, vapp_name, network_name, network_mode):
nets = filter(lambda n: n.name == network_name, module.vca.get_networks(vdc_name))
if len(nets) != 1:
module.fail_json("Unable to find network %s " % network_name)
the_vdc = module.vca.get_vdc(vdc_name)
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
if the_vapp and the_vapp.name != vapp_name:
module.fail_json(msg="Failed to find vapp named %s" % the_vapp.name)
# Connect vApp
task = the_vapp.connect_to_network(nets[0].name, nets[0].href)
result = module.vca.block_until_completed(task)
if result is None:
module.fail_json(msg="Failed to complete task")
# Connect VM
ip_allocation_mode = None
if network_mode == 'pool':
ip_allocation_mode = 'POOL'
elif network_mode == 'dhcp':
ip_allocation_mode = 'DHCP'
task = the_vapp.connect_vms(nets[0].name, connection_index=0, ip_allocation_mode=ip_allocation_mode)
if result is None:
module.fail_json(msg="Failed to complete task")
result = module.vca.block_until_completed(task)
if result is None:
module.fail_json(msg="Failed to complete task")
# Update VApp info and get VM IP
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
if the_vapp is None:
module.fail_json(msg="Failed to get vapp named %s" % vapp_name)
return get_vm_details(module)
def get_vm_details(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
vm_name = module.params['vm_name']
the_vdc = module.vca.get_vdc(vdc_name)
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
if the_vapp and the_vapp.name != vapp_name:
module.fail_json(msg="Failed to find vapp named %s" % the_vapp.name)
the_vm_details = dict()
for vm in the_vapp.me.Children.Vm:
sections = vm.get_Section()
customization_section = (
filter(lambda section:
section.__class__.__name__ ==
"GuestCustomizationSectionType",
sections)[0])
if customization_section.get_AdminPasswordEnabled():
the_vm_details["vm_admin_password"] = customization_section.get_AdminPassword()
virtual_hardware_section = (
filter(lambda section:
section.__class__.__name__ ==
"VirtualHardwareSection_Type",
sections)[0])
items = virtual_hardware_section.get_Item()
ips = []
_url = '{http://www.vmware.com/vcloud/v1.5}ipAddress'
for item in items:
if item.Connection:
for c in item.Connection:
if c.anyAttributes_.get(
_url):
ips.append(c.anyAttributes_.get(
_url))
if len(ips) > 0:
the_vm_details["vm_ip"] = ips[0]
return the_vm_details
def main():
argument_spec = dict(
vapp_name=dict(required=True),
vdc_name=dict(required=True),
template_name=dict(),
catalog_name=dict(default='Public Catalog'),
network_name=dict(),
network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
vm_name=dict(),
vm_cpus=dict(),
vm_memory=dict(),
operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
state=dict(default='present', choices=VAPP_STATES)
)
module = VcaAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
operation = module.params['operation']
instance = get_instance(module)
result = dict(changed=False)
if instance and state == 'absent':
if not module.check_mode:
delete(module)
result['changed'] = True
elif state != 'absent':
if instance['state'] == 'absent':
if not module.check_mode:
result['ansible_facts'] = create(module)
result['changed'] = True
elif instance['state'] != state and state != 'present':
if not module.check_mode:
set_state(module)
result['changed'] = True
if operation != instance.get('status') and operation != 'noop':
if not module.check_mode:
do_operation(module)
result['changed'] = True
result['ansible_facts'] = get_vm_details(module)
return module.exit(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/264_test_bufio.py | 58 | 2435 | import unittest
from test import test_support
# Simple test to ensure that optimizations in fileobject.c deliver
# the expected results. For best testing, run this under a debug-build
# Python too (to exercise asserts in the C code).
lengths = range(1, 257) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]
class BufferSizeTest(unittest.TestCase):
def try_one(self, s):
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
# Ensure we can open TESTFN for writing.
test_support.unlink(test_support.TESTFN)
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = open(test_support.TESTFN, "wb")
try:
# write once with \n and once without
f.write(s)
f.write("\n")
f.write(s)
f.close()
f = open(test_support.TESTFN, "rb")
line = f.readline()
self.assertEqual(line, s + "\n")
line = f.readline()
self.assertEqual(line, s)
line = f.readline()
self.assert_(not line) # Must be at EOF
f.close()
finally:
test_support.unlink(test_support.TESTFN)
def drive_one(self, pattern):
for length in lengths:
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. Try this with all
# small sizes and various powers of 2, so we exercise all likely
# stdio buffer sizes, and "off by one" errors on both sides.
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
self.assertEqual(len(teststring), length)
self.try_one(teststring)
self.try_one(teststring + "x")
self.try_one(teststring[:-1])
def test_primepat(self):
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
self.drive_one("1234567890\00\01\02\03\04\05\06")
def test_nullpat(self):
self.drive_one("\0" * 1000)
def test_main():
test_support.run_unittest(BufferSizeTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
pytroll/satpy | satpy/readers/eum_base.py | 1 | 3822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for EUMETSAT satellite data."""
from datetime import datetime, timedelta
import numpy as np
# 6 bytes, 8 bytes, 10 bytes
time_cds_short = [('Days', '>u2'), ('Milliseconds', '>u4')]
time_cds = time_cds_short + [('Microseconds', '>u2')]
time_cds_expanded = time_cds + [('Nanoseconds', '>u2')]
issue_revision = [('Issue', np.uint16), ('Revision', np.uint16)]
def timecds2datetime(tcds):
"""Convert time_cds-variables to datetime-object.
Works both with a dictionary and a numpy record_array.
"""
days = int(tcds['Days'])
milliseconds = int(tcds['Milliseconds'])
try:
microseconds = int(tcds['Microseconds'])
except (KeyError, ValueError):
microseconds = 0
try:
microseconds += int(tcds['Nanoseconds']) / 1000.
except (KeyError, ValueError):
pass
reference = datetime(1958, 1, 1)
delta = timedelta(days=days, milliseconds=milliseconds,
microseconds=microseconds)
return reference + delta
def recarray2dict(arr):
"""Convert numpy record array to a dictionary."""
res = {}
tcds_types = [time_cds_short, time_cds, time_cds_expanded]
for dtuple in arr.dtype.descr:
key = dtuple[0]
ntype = dtuple[1]
data = arr[key]
if ntype in tcds_types:
if data.size > 1:
res[key] = np.array([timecds2datetime(item)
for item in data.ravel()]).reshape(data.shape)
else:
res[key] = timecds2datetime(data)
elif isinstance(ntype, list):
res[key] = recarray2dict(data)
else:
if data.size == 1:
data = data[0]
if ntype[:2] == '|S':
# Python2 and Python3 handle strings differently
try:
data = data.decode()
except ValueError:
pass
data = data.split(':')[0].strip()
res[key] = data
else:
res[key] = data.squeeze()
return res
def get_service_mode(instrument_name, ssp_lon):
"""Get information about service mode for a given instrument and subsatellite longitude."""
service_modes = {'seviri': {'0.0': {'service_name': 'fes', 'service_desc': 'Full Earth Scanning service'},
'9.5': {'service_name': 'rss', 'service_desc': 'Rapid Scanning Service'},
'41.5': {'service_name': 'iodc', 'service_desc': 'Indian Ocean Data Coverage service'}
},
'fci': {'0.0': {'service_name': 'fdss', 'service_desc': 'Full Disk Scanning Service'},
'9.5': {'service_name': 'rss', 'service_desc': 'Rapid Scanning Service'},
},
}
unknown_modes = {'service_name': 'unknown', 'service_desc': 'unknown'}
return service_modes.get(instrument_name, unknown_modes).get('{:.1f}'.format(ssp_lon), unknown_modes)
| gpl-3.0 |
gminds/rapidnewsng | django/contrib/localflavor/us/models.py | 196 | 1261 | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.contrib.localflavor.us.us_states import USPS_CHOICES
class USStateField(CharField):
description = _("U.S. state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(USStateField, self).__init__(*args, **kwargs)
class USPostalCodeField(CharField):
description = _("U.S. postal code (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = USPS_CHOICES
kwargs['max_length'] = 2
super(USPostalCodeField, self).__init__(*args, **kwargs)
class PhoneNumberField(CharField):
description = _("Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| bsd-3-clause |
annarev/tensorflow | tensorflow/compiler/mlir/tfr/examples/mnist/mnist_ops_test.py | 11 | 4189 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow.compiler.mlir.tfr.examples.mnist.ops_defs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.compiler.mlir.tfr.examples.mnist import gen_mnist_ops
from tensorflow.compiler.mlir.tfr.examples.mnist import ops_defs
from tensorflow.compiler.mlir.tfr.python import test_utils
from tensorflow.python.framework import load_library
from tensorflow.python.platform import test
_lib_dir = os.path.dirname(gen_mnist_ops.__file__)
_lib_name = os.path.basename(gen_mnist_ops.__file__)[4:].replace('.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))
class MnistOpsDefsTest(test_utils.OpsDefsTest):
def test_new_conv2d_relu(self):
input_ = tf.random.uniform([1, 4, 4, 1])
filter_ = tf.random.uniform([2, 2, 1, 8])
bias = tf.zeros([8])
kwargs = {
'input_': input_,
'filter_': filter_,
'bias': bias,
'stride_w': 2,
'stride_h': 2,
'dilation_w': 1,
'dilation_h': 1,
'padding': 'SAME',
'act': 'RELU'
}
self._assertOpAndComposite([input_, filter_, bias],
tf.function(gen_mnist_ops.new_conv2d),
ops_defs._composite_conv_add_relu, kwargs)
def test_new_conv2d_relu6(self):
input_ = tf.random.uniform([1, 4, 4, 1])
filter_ = tf.random.uniform([2, 2, 1, 8])
bias = tf.zeros([8])
kwargs = {
'input_': input_,
'filter_': filter_,
'bias': bias,
'stride_w': 2,
'stride_h': 2,
'dilation_w': 1,
'dilation_h': 1,
'padding': 'SAME',
'act': 'RELU6'
}
self._assertOpAndComposite([input_, filter_, bias],
tf.function(gen_mnist_ops.new_conv2d),
ops_defs._composite_conv_add_relu, kwargs)
def test_new_conv2d_tanh(self):
self.skipTest('Fix tanh gradients')
input_ = tf.random.uniform([1, 4, 4, 1])
filter_ = tf.random.uniform([2, 2, 1, 8])
bias = tf.zeros([8])
kwargs = {
'input_': input_,
'filter_': filter_,
'bias': bias,
'stride_w': 2,
'stride_h': 2,
'dilation_w': 1,
'dilation_h': 1,
'padding': 'SAME',
'act': 'TANH'
}
self._assertOpAndComposite([input_, filter_, bias],
tf.function(gen_mnist_ops.new_conv2d),
ops_defs._composite_conv_add_relu, kwargs)
def test_new_fully_connected(self):
input_ = tf.random.uniform([2, 4])
filter_ = tf.random.uniform([3, 4])
bias = tf.zeros([3])
kwargs = {'input_': input_, 'filter_': filter_, 'bias': bias, 'act': 'RELU'}
self._assertOpAndComposite([input_, filter_, bias],
tf.function(gen_mnist_ops.new_fully_connected),
ops_defs._composite_fully_connected, kwargs)
def test_new_max_pool(self):
input_ = tf.random.uniform([8, 4, 4, 1])
kwargs = {
'input_': input_,
'stride_w': 2,
'stride_h': 2,
'filter_width': 1,
'filter_height': 1,
'padding': 'SAME',
}
self._assertOpAndComposite([input_],
tf.function(gen_mnist_ops.new_max_pool),
ops_defs._composite_max_pool, kwargs)
if __name__ == '__main__':
os.environ[
'TF_MLIR_TFR_LIB_DIR'] = 'tensorflow/compiler/mlir/tfr/examples/mnist'
test.main()
| apache-2.0 |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/ec2/volume.py | 155 | 10436 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Elastic Block Storage Volume
"""
from boto.resultset import ResultSet
from boto.ec2.tag import Tag
from boto.ec2.ec2object import TaggedEC2Object
class Volume(TaggedEC2Object):
"""
Represents an EBS volume.
:ivar id: The unique ID of the volume.
:ivar create_time: The timestamp of when the volume was created.
:ivar status: The status of the volume.
:ivar size: The size (in GB) of the volume.
:ivar snapshot_id: The ID of the snapshot this volume was created
from, if applicable.
:ivar attach_data: An AttachmentSet object.
:ivar zone: The availability zone this volume is in.
:ivar type: The type of volume (standard or consistent-iops)
:ivar iops: If this volume is of type consistent-iops, this is
the number of IOPS provisioned (10-300).
:ivar encrypted: True if this volume is encrypted.
"""
def __init__(self, connection=None):
super(Volume, self).__init__(connection)
self.id = None
self.create_time = None
self.status = None
self.size = None
self.snapshot_id = None
self.attach_data = None
self.zone = None
self.type = None
self.iops = None
self.encrypted = None
def __repr__(self):
return 'Volume:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Volume, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'attachmentSet':
self.attach_data = AttachmentSet()
return self.attach_data
elif name == 'tagSet':
self.tags = ResultSet([('item', Tag)])
return self.tags
else:
return None
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'createTime':
self.create_time = value
elif name == 'status':
if value != '':
self.status = value
elif name == 'size':
self.size = int(value)
elif name == 'snapshotId':
self.snapshot_id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'volumeType':
self.type = value
elif name == 'iops':
self.iops = int(value)
elif name == 'encrypted':
self.encrypted = (value.lower() == 'true')
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the data associated with this volume by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
volume the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
# Check the resultset since Eucalyptus ignores the volumeId param
unfiltered_rs = self.connection.get_all_volumes(
[self.id],
dry_run=dry_run
)
rs = [x for x in unfiltered_rs if x.id == self.id]
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Volume ID' % self.id)
return self.status
def delete(self, dry_run=False):
"""
Delete this EBS volume.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_volume(self.id, dry_run=dry_run)
def attach(self, instance_id, device, dry_run=False):
"""
Attach this EBS volume to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposed (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
"""
return self.connection.attach_volume(
self.id,
instance_id,
device,
dry_run=dry_run
)
def detach(self, force=False, dry_run=False):
"""
Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment
attempt did not occur cleanly. This option can lead to
data loss or a corrupted file system. Use this option only
as a last resort to detach a volume from a failed
instance. The instance will not have an opportunity to
flush file system caches nor file system meta data. If you
use this option, you must perform file system check and
repair procedures.
:rtype: bool
:return: True if successful
"""
instance_id = None
if self.attach_data:
instance_id = self.attach_data.instance_id
device = None
if self.attach_data:
device = self.attach_data.device
return self.connection.detach_volume(
self.id,
instance_id,
device,
force,
dry_run=dry_run
)
def create_snapshot(self, description=None, dry_run=False):
"""
Create a snapshot of this EBS Volume.
:type description: str
:param description: A description of the snapshot.
Limited to 256 characters.
:rtype: :class:`boto.ec2.snapshot.Snapshot`
:return: The created Snapshot object
"""
return self.connection.create_snapshot(
self.id,
description,
dry_run=dry_run
)
def volume_state(self):
"""
Returns the state of the volume. Same value as the status attribute.
"""
return self.status
def attachment_state(self):
"""
Get the attachment state.
"""
state = None
if self.attach_data:
state = self.attach_data.status
return state
def snapshots(self, owner=None, restorable_by=None, dry_run=False):
"""
Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the
specified user will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that
are restorable by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
"""
rs = self.connection.get_all_snapshots(
owner=owner,
restorable_by=restorable_by,
dry_run=dry_run
)
mine = []
for snap in rs:
if snap.volume_id == self.id:
mine.append(snap)
return mine
class AttachmentSet(object):
"""
Represents an EBS attachmentset.
:ivar id: The unique ID of the volume.
:ivar instance_id: The unique ID of the attached instance
:ivar status: The status of the attachment
:ivar attach_time: Attached since
:ivar device: The device the instance has mapped
"""
def __init__(self):
self.id = None
self.instance_id = None
self.status = None
self.attach_time = None
self.device = None
def __repr__(self):
return 'AttachmentSet:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'status':
self.status = value
elif name == 'attachTime':
self.attach_time = value
elif name == 'device':
self.device = value
else:
setattr(self, name, value)
class VolumeAttribute(object):
def __init__(self, parent=None):
self.id = None
self._key_name = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'autoEnableIO':
self._key_name = name
return None
def endElement(self, name, value, connection):
if name == 'value':
if value.lower() == 'true':
self.attrs[self._key_name] = True
else:
self.attrs[self._key_name] = False
elif name == 'volumeId':
self.id = value
else:
setattr(self, name, value)
| bsd-3-clause |
DavidLP/home-assistant | homeassistant/components/blink/binary_sensor.py | 8 | 1522 | """Support for Blink system camera control."""
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import CONF_MONITORED_CONDITIONS
from . import BINARY_SENSORS, BLINK_DATA
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the blink binary sensors."""
if discovery_info is None:
return
data = hass.data[BLINK_DATA]
devs = []
for camera in data.cameras:
for sensor_type in discovery_info[CONF_MONITORED_CONDITIONS]:
devs.append(BlinkBinarySensor(data, camera, sensor_type))
add_entities(devs, True)
class BlinkBinarySensor(BinarySensorDevice):
"""Representation of a Blink binary sensor."""
def __init__(self, data, camera, sensor_type):
"""Initialize the sensor."""
self.data = data
self._type = sensor_type
name, icon = BINARY_SENSORS[sensor_type]
self._name = "{} {} {}".format(BLINK_DATA, camera, name)
self._icon = icon
self._camera = data.cameras[camera]
self._state = None
self._unique_id = "{}-{}".format(self._camera.serial, self._type)
@property
def name(self):
"""Return the name of the blink sensor."""
return self._name
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
def update(self):
"""Update sensor state."""
self.data.refresh()
self._state = self._camera.attributes[self._type]
| apache-2.0 |
rvs/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_mirror_matching_check.py | 9 | 8866 | import os
from mock import *
from gp_unittest import *
from gpcheckcat_modules.mirror_matching_check import MirrorMatchingCheck
from gppylib.gparray import FAULT_STRATEGY_FILE_REPLICATION, FAULT_STRATEGY_NONE
class MirrorMatchingTestCase(GpTestCase):
def setUp(self):
self.subject = MirrorMatchingCheck()
self.logger = Mock(spec=['log', 'info', 'debug', 'error'])
self.db_connection = Mock(spec=['close', 'query'])
self.gpConfigMock = Mock()
self.apply_patches([
patch('gpcheckcat_modules.mirror_matching_check.get_gparray_from_config', return_value=self.gpConfigMock),
])
def test_mirror_matching_permutations(self):
tests = self._get_list_of_tests_for_permutations_of_matching()
for test in tests:
segment_states = self._create_mirror_states(mirror_id_and_state_collection=test["segment_ids_and_states"])
config_mirror_strategy = FAULT_STRATEGY_FILE_REPLICATION if test["config"] else FAULT_STRATEGY_NONE
self.gpConfigMock.getFaultStrategy.return_value = config_mirror_strategy
self.db_connection.reset_mock()
self.logger.reset_mock()
self._setup_mirroring_matching(
database_mirror_states=segment_states
)
self.subject.run_check(self.db_connection, self.logger)
self._assert_mirroring_common()
if test["is_matched"]:
info_messages = self.logger.info.call_args_list
self.assertIn("[OK] mirroring_matching", info_messages[2][0])
else:
self._assert_mirroring_mismatch(segment_states, config_mirror_strategy)
####################### PRIVATE METHODS #######################
def _setup_mirroring_matching(self, database_mirror_states=None):
self.db_connection.query.return_value.getresult.return_value = database_mirror_states
def _assert_mirroring_mismatch(self, segment_states, config_mirror_strategy):
info_messages = self.logger.info.call_args_list
self.assertIn('[FAIL] Mirroring mismatch detected', info_messages[2][0])
error_messages = self.logger.error.call_args_list
config_enabled = config_mirror_strategy == FAULT_STRATEGY_FILE_REPLICATION
self.assertIn("The GP configuration reports mirror enabling is: %s" % config_enabled, info_messages[3][0])
self.assertIn("The following segments are mismatched in PT:", error_messages[0][0])
self.assertIn("Segment ID:\tmirror_existence_state:", error_messages[2][0])
info_index = 3
for segment_state in segment_states:
segment_id = segment_state[0]
segment_mirroring_state = segment_state[1]
segment_enabled = segment_mirroring_state > 1
# 0 is considered a match in either situation
if segment_mirroring_state == 0:
segment_enabled = config_enabled
if segment_enabled != config_enabled:
label = "Enabled" if segment_enabled else "Disabled"
self.assertIn("%s\t\t%s (%s)" % (segment_id, segment_mirroring_state, label), error_messages[info_index][0])
info_index += 1
def _assert_mirroring_common(self):
expected_message1 = '-----------------------------------'
expected_message2 = 'Checking mirroring_matching'
info_messages = self.logger.info.call_args_list
error_messages = self.logger.error.call_args_list
self.assertIn(expected_message1, info_messages[0][0])
self.assertIn(expected_message2, info_messages[1][0])
self.db_connection.query.assert_called_once_with(
"SELECT gp_segment_id, mirror_existence_state FROM gp_dist_random('gp_persistent_relation_node') GROUP BY 1,2")
return error_messages, info_messages
def _get_list_of_tests_for_permutations_of_matching(self):
result = []
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3)], is_matched=True))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1)], is_matched=True))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3), (2, 3)], is_matched=True))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3), (2, 3)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 3)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 4)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3), (2, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3), (2, 1)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 1)], is_matched=True))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 4), (2, 3), (3, 3)], is_matched=True))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3), (2, 3), (3, 5)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 3), (3, 3)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 4), (3, 3)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3), (2, 1), (3, 3)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3), (2, 1), (3, 7)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3), (2, 6), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 3), (2, 4), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 1), (3, 3)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 1), (3, 3)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 6), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 3), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 3), (2, 1), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 5), (2, 1), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 1), (3, 1)], is_matched=False))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (2, 1), (3, 1)], is_matched=True))
# A case to test for when one segment returns multiple mirror states
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 1), (2, 1), (2, 3), (3, 3)], is_matched=False))
# 0 matches enabled
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(2, 0), (2, 3), (3, 3)], is_matched=True))
result.append(self._create_test_permutation(config=True, segment_ids_and_states=[(1, 0), (1, 1), (2, 3), (2, 0)], is_matched=False))
# 0 matches disabled
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (1, 0), (2, 1)], is_matched=True))
result.append(self._create_test_permutation(config=False, segment_ids_and_states=[(1, 1), (1, 0), (2, 0), (2, 3)], is_matched=False))
return result
# Convert the arguments to a dictionary
def _create_test_permutation(self, **permutation_args):
return permutation_args
# Convert a list of tuples to a list of lists
def _create_mirror_states(self, mirror_id_and_state_collection):
result = []
for (seg_id, mirror_state) in mirror_id_and_state_collection:
result.append([seg_id, mirror_state])
return result
if __name__ == '__main__':
run_tests()
| apache-2.0 |
sometallgit/AutoUploader | Python27/Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
spladug/cabot | cabot/cabotapp/migrations/0009_auto__add_alertpluginuserdata__add_alertplugin__del_field_instance_tel.py | 13 | 17985 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AlertPlugin'
db.create_table(u'cabotapp_alertplugin', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'polymorphic_cabotapp.alertplugin_set', null=True, to=orm['contenttypes.ContentType'])),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'cabotapp', ['AlertPlugin'])
# Adding model 'AlertPluginUserData'
db.create_table(u'cabotapp_alertpluginuserdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'polymorphic_cabotapp.alertpluginuserdata_set', null=True, to=orm['contenttypes.ContentType'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=30)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabotapp.UserProfile'])),
))
db.send_create_signal(u'cabotapp', ['AlertPluginUserData'])
# Adding unique constraint on 'AlertPluginUserData', fields ['title', 'user']
db.create_unique(u'cabotapp_alertpluginuserdata', ['title', 'user_id'])
# Adding M2M table for field alerts on 'Instance'
m2m_table_name = db.shorten_name(u'cabotapp_instance_alerts')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('instance', models.ForeignKey(orm[u'cabotapp.instance'], null=False)),
('alertplugin', models.ForeignKey(orm[u'cabotapp.alertplugin'], null=False))
))
db.create_unique(m2m_table_name, ['instance_id', 'alertplugin_id'])
# Adding M2M table for field alerts on 'Service'
m2m_table_name = db.shorten_name(u'cabotapp_service_alerts')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm[u'cabotapp.service'], null=False)),
('alertplugin', models.ForeignKey(orm[u'cabotapp.alertplugin'], null=False))
))
db.create_unique(m2m_table_name, ['service_id', 'alertplugin_id'])
def backwards(self, orm):
# Removing unique constraint on 'AlertPluginUserData', fields ['title', 'user']
db.delete_unique(u'cabotapp_alertpluginuserdata', ['title', 'user_id'])
# Deleting model 'AlertPlugin'
db.delete_table(u'cabotapp_alertplugin')
# Deleting model 'AlertPluginUserData'
db.delete_table(u'cabotapp_alertpluginuserdata')
# Removing M2M table for field alerts on 'Instance'
db.delete_table(db.shorten_name(u'cabotapp_instance_alerts'))
# Removing M2M table for field alerts on 'Service'
db.delete_table(db.shorten_name(u'cabotapp_service_alerts'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabotapp.alertplugin': {
'Meta': {'object_name': 'AlertPlugin'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertplugin_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabotapp.alertpluginuserdata': {
'Meta': {'unique_together': "(('title', 'user'),)", 'object_name': 'AlertPluginUserData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.alertpluginuserdata_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.UserProfile']"})
},
u'cabotapp.instance': {
'Meta': {'ordering': "['name']", 'object_name': 'Instance'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.instancestatussnapshot': {
'Meta': {'object_name': 'InstanceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Instance']"}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.AlertPlugin']", 'symmetrical': 'False', 'blank': 'True'}),
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.Instance']", 'symmetrical': 'False', 'blank': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': u"orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verify_ssl_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
u'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp'] | mit |
pigeonflight/strider-plone | docker/appengine/lib/httplib2/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| mit |
richardcs/ansible | lib/ansible/modules/network/fortios/fortios_firewall_address.py | 10 | 15929 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_address
short_description: Configure IPv4 addresses.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and address category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_address:
description:
- Configure IPv4 addresses.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
allow-routing:
description:
- Enable/disable use of this address in the static route configuration.
choices:
- enable
- disable
associated-interface:
description:
- Network interface associated with address. Source system.interface.name system.zone.name.
cache-ttl:
description:
- Defines the minimal TTL of individual IP addresses in FQDN cache measured in seconds.
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
country:
description:
- IP addresses associated to a specific country.
end-ip:
description:
- Final IP address (inclusive) in the range for the address.
epg-name:
description:
- Endpoint group name.
filter:
description:
- Match criteria filter.
fqdn:
description:
- Fully Qualified Domain Name address.
list:
description:
- IP address list.
suboptions:
ip:
description:
- IP.
required: true
name:
description:
- Address name.
required: true
obj-id:
description:
- Object ID for NSX.
organization:
description:
- "Organization domain name (Syntax: organization/domain)."
policy-group:
description:
- Policy group name.
sdn:
description:
- SDN.
choices:
- aci
- aws
- azure
- gcp
- nsx
- nuage
- oci
sdn-tag:
description:
- SDN Tag.
start-ip:
description:
- First IP address (inclusive) in the range for the address.
subnet:
description:
- IP address and subnet mask of address.
subnet-name:
description:
- Subnet name.
tagging:
description:
- Config object tagging.
suboptions:
category:
description:
- Tag category. Source system.object-tagging.category.
name:
description:
- Tagging entry name.
required: true
tags:
description:
- Tags.
suboptions:
name:
description:
- Tag name. Source system.object-tagging.tags.name.
required: true
tenant:
description:
- Tenant.
type:
description:
- Type of address.
choices:
- ipmask
- iprange
- fqdn
- geography
- wildcard
- wildcard-fqdn
- dynamic
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
visibility:
description:
- Enable/disable address visibility in the GUI.
choices:
- enable
- disable
wildcard:
description:
- IP address and wildcard netmask.
wildcard-fqdn:
description:
- Fully Qualified Domain Name with wildcard characters.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 addresses.
fortios_firewall_address:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_address:
state: "present"
allow-routing: "enable"
associated-interface: "<your_own_value> (source system.interface.name system.zone.name)"
cache-ttl: "5"
color: "6"
comment: "Comment."
country: "<your_own_value>"
end-ip: "<your_own_value>"
epg-name: "<your_own_value>"
filter: "<your_own_value>"
fqdn: "<your_own_value>"
list:
-
ip: "<your_own_value>"
name: "default_name_15"
obj-id: "<your_own_value>"
organization: "<your_own_value>"
policy-group: "<your_own_value>"
sdn: "aci"
sdn-tag: "<your_own_value>"
start-ip: "<your_own_value>"
subnet: "<your_own_value>"
subnet-name: "<your_own_value>"
tagging:
-
category: "<your_own_value> (source system.object-tagging.category)"
name: "default_name_26"
tags:
-
name: "default_name_28 (source system.object-tagging.tags.name)"
tenant: "<your_own_value>"
type: "ipmask"
uuid: "<your_own_value>"
visibility: "enable"
wildcard: "<your_own_value>"
wildcard-fqdn: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_address_data(json):
option_list = ['allow-routing', 'associated-interface', 'cache-ttl',
'color', 'comment', 'country',
'end-ip', 'epg-name', 'filter',
'fqdn', 'list', 'name',
'obj-id', 'organization', 'policy-group',
'sdn', 'sdn-tag', 'start-ip',
'subnet', 'subnet-name', 'tagging',
'tenant', 'type', 'uuid',
'visibility', 'wildcard', 'wildcard-fqdn']
dictionary = {}
for attribute in option_list:
if attribute in json:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_address(data, fos):
vdom = data['vdom']
firewall_address_data = data['firewall_address']
filtered_data = filter_firewall_address_data(firewall_address_data)
if firewall_address_data['state'] == "present":
return fos.set('firewall',
'address',
data=filtered_data,
vdom=vdom)
elif firewall_address_data['state'] == "absent":
return fos.delete('firewall',
'address',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_address']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_address": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"allow-routing": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"associated-interface": {"required": False, "type": "str"},
"cache-ttl": {"required": False, "type": "int"},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"country": {"required": False, "type": "str"},
"end-ip": {"required": False, "type": "str"},
"epg-name": {"required": False, "type": "str"},
"filter": {"required": False, "type": "str"},
"fqdn": {"required": False, "type": "str"},
"list": {"required": False, "type": "list",
"options": {
"ip": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"obj-id": {"required": False, "type": "str"},
"organization": {"required": False, "type": "str"},
"policy-group": {"required": False, "type": "str"},
"sdn": {"required": False, "type": "str",
"choices": ["aci", "aws", "azure",
"gcp", "nsx", "nuage",
"oci"]},
"sdn-tag": {"required": False, "type": "str"},
"start-ip": {"required": False, "type": "str"},
"subnet": {"required": False, "type": "str"},
"subnet-name": {"required": False, "type": "str"},
"tagging": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tags": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"tenant": {"required": False, "type": "str"},
"type": {"required": False, "type": "str",
"choices": ["ipmask", "iprange", "fqdn",
"geography", "wildcard", "wildcard-fqdn",
"dynamic"]},
"uuid": {"required": False, "type": "str"},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"wildcard": {"required": False, "type": "str"},
"wildcard-fqdn": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kwierman/osf.io | tests/test_websitefiles.py | 16 | 23884 | # -*- coding: utf-8 -*-
import datetime
from nose.tools import * # noqa
import mock
from modularodm import Q
from website.files import utils
from website.files import models
from website.files import exceptions
from website.models import Guid
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
class TestFileNode(models.FileNode):
provider = 'test'
class TestFile(models.File, TestFileNode):
pass
class TestFolder(models.Folder, TestFileNode):
pass
class FilesTestCase(OsfTestCase):
def setUp(self):
super(FilesTestCase, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def tearDown(self):
super(FilesTestCase, self).setUp()
models.StoredFileNode.remove()
models.TrashedFileNode.remove()
class TestFileNodeMeta(FilesTestCase):
def test_conflicting_providers(self):
with assert_raises(ValueError) as e:
class Two(models.FileNode):
is_file = True
provider = 'test'
assert_equal(e.exception.message, 'Conflicting providers')
class TestStoredFileNode(FilesTestCase):
def setUp(self):
super(TestStoredFileNode, self).setUp()
self.sfn = models.StoredFileNode(
path='anid',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
)
self.sfn.save()
def test_deep_url(self):
url = self.sfn.deep_url
assert_true(isinstance(url, basestring))
assert_in(self.node._id, url)
assert_in(self.sfn.path, url)
assert_in(self.sfn.provider, url)
def test_deep_url_unicode(self):
self.sfn.path = u'༼ つ ͠° ͟ ͟ʖ ͡° ༽つ'
self.sfn.save()
url = self.sfn.deep_url
assert_true(isinstance(url, basestring))
assert_in(self.node._id, url)
# Path is url encode
# assert_in(self.sfn.path, url)
assert_in(self.sfn.provider, url)
def test_wrapped(self):
assert_true(isinstance(self.sfn.wrapped(), TestFile))
def test_wrapped_invalid_provider(self):
with assert_raises(exceptions.SubclassNotFound):
self.sfn.provider = 'the ocean'
self.sfn.wrapped()
def test_get_guid_no_create(self):
assert_is(self.sfn.get_guid(), None)
def test_get_guid_create(self):
guid = self.sfn.get_guid(create=True)
assert_equal(guid.referent, self.sfn)
assert_equal(self.sfn.get_guid(), guid)
class TestFileNodeObj(FilesTestCase):
def test_create(self):
with assert_raises(AssertionError):
TestFileNode.create()
with assert_raises(AssertionError):
models.File.create()
working = TestFile.create(name='myname')
assert_equals(working.is_file, True)
assert_equals(working.name, 'myname')
assert_equals(working.provider, 'test')
def test_get_or_create(self):
created = TestFile.get_or_create(self.node, 'Path')
created.name = 'kerp'
created.materialized_path = 'crazypath'
created.save()
found = TestFile.get_or_create(self.node, '/Path')
assert_equals(found.name, 'kerp')
assert_equals(found.materialized_path, 'crazypath')
def test_get_file_guids(self):
created = TestFile.get_or_create(self.node, 'Path')
created.name = 'kerp'
created.materialized_path = '/Path'
created.get_guid(create=True)
created.save()
file_guids = TestFile.get_file_guids(materialized_path=created.materialized_path,
provider=created.provider,
node=self.node)
assert_in(created.get_guid()._id, file_guids)
def test_get_file_guids_with_folder_path(self):
created = TestFile.get_or_create(self.node, 'folder/Path')
created.name = 'kerp'
created.materialized_path = '/folder/Path'
created.get_guid(create=True)
created.save()
file_guids = TestFile.get_file_guids(materialized_path='folder/',
provider=created.provider,
node=self.node)
assert_in(created.get_guid()._id, file_guids)
def test_get_file_guids_with_folder_path_does_not_include_deleted_files(self):
created = TestFile.get_or_create(self.node, 'folder/Path')
created.name = 'kerp'
created.materialized_path = '/folder/Path'
guid = created.get_guid(create=True)
created.save()
created.delete()
file_guids = TestFile.get_file_guids(materialized_path='folder/',
provider=created.provider,
node=self.node)
assert_not_in(guid._id, file_guids)
def test_kind(self):
assert_equals(TestFile().kind, 'file')
assert_equals(TestFolder().kind, 'folder')
def test_filter_build(self):
qs = TestFile._filter(Q('test', 'eq', 'test'))
_, is_file, provider = qs.nodes
assert_equal(is_file.__dict__, Q('is_file', 'eq', True).__dict__)
assert_equal(provider.__dict__, Q('provider', 'eq', 'test').__dict__)
def test_resolve_class(self):
assert_equal(
TestFile,
models.FileNode.resolve_class('test', models.FileNode.FILE)
)
assert_equal(
TestFolder,
models.FileNode.resolve_class('test', models.FileNode.FOLDER)
)
assert_equal(
TestFileNode,
models.FileNode.resolve_class('test', models.FileNode.ANY)
)
def test_find(self):
models.StoredFileNode.remove_one(self.node.get_addon('osfstorage').root_node)
models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).save()
models.StoredFileNode(
path='afolder',
name='name',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/name2/',
).save()
expected = ['afile', 'afolder']
select = lambda y: [x.path for x in y.find()]
assert_equal(expected, select(models.FileNode))
assert_equal(expected, select(TestFileNode))
assert_equal(['afile'], select(TestFile))
assert_equal(['afile'], select(models.File))
assert_equal(['afolder'], select(TestFolder))
assert_equal(['afolder'], select(models.Folder))
def test_find_one(self):
models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).save()
found = TestFile.find_one(Q('path', 'eq', 'afile'))
assert_true(isinstance(found, TestFile))
assert_equal(found.materialized_path, '/long/path/to/name')
def test_load(self):
item = models.StoredFileNode(
path='afolder',
name='name',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/name2/',
)
item.save()
assert_is(models.FileNode.load('notanid'), None)
assert_true(isinstance(TestFolder.load(item._id), TestFolder))
assert_true(isinstance(models.FileNode.load(item._id), TestFolder))
with assert_raises(AssertionError):
TestFile.load(item._id)
def test_parent(self):
parent = models.StoredFileNode(
path='afolder',
name='name',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/name2/',
).wrapped()
parent.save()
child = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
child.save()
assert_is(child.parent, None)
assert_false(isinstance(parent, models.StoredFileNode))
child.parent = parent
assert_true(isinstance(child.parent, models.FileNode))
child.parent = parent.stored_object
assert_true(isinstance(child.parent, models.FileNode))
def test_save(self):
child = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
assert_false(child._is_loaded)
assert_false(child.stored_object._is_loaded)
child.save()
assert_true(child._is_loaded)
assert_true(child.stored_object._is_loaded)
def test_delete(self):
models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped().delete()
trashed = models.TrashedFileNode.find_one()
assert_equal(trashed.path, 'afile')
assert_equal(trashed.node, self.node)
assert_equal(trashed.materialized_path, '/long/path/to/name')
def test_delete_with_guid(self):
fn = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
guid = fn.get_guid(create=True)
fn.delete()
trashed = models.TrashedFileNode.find_one()
guid.reload()
assert_equal(guid.referent, trashed)
assert_equal(trashed.path, 'afile')
assert_equal(trashed.node, self.node)
assert_equal(trashed.materialized_path, '/long/path/to/name')
assert_less((trashed.deleted_on - datetime.datetime.utcnow()).total_seconds(), 5)
def test_delete_with_user(self):
fn = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
fn.delete(user=self.user)
trashed = models.TrashedFileNode.find_one()
assert_equal(trashed.deleted_by, self.user)
assert_equal(models.StoredFileNode.load(fn._id), None)
def test_restore_file(self):
root = models.StoredFileNode(
path='root',
name='rootfolder',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to',
).wrapped()
root.save()
fn = models.StoredFileNode(
parent=root._id,
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
guid = Guid.generate(fn)
before = fn.to_storage()
trashed = fn.delete(user=self.user)
restored = trashed.restore()
assert_equal(
restored.to_storage(),
before
)
assert_equal(models.TrashedFileNode.load(trashed._id), None)
# Guid is repointed
guid.reload()
assert_equal(guid.referent, restored)
def test_restore_folder(self):
root = models.StoredFileNode(
path='root',
name='rootfolder',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/',
).wrapped()
root.save()
fn = models.StoredFileNode(
parent=root._id,
path='afolder',
name='folder_name',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/folder_name',
).wrapped()
before = fn.to_storage()
trashed = fn.delete(user=self.user)
assert_equal(
trashed.restore().to_storage(),
before
)
assert_equal(models.TrashedFileNode.load(trashed._id), None)
def test_restore_folder_nested(self):
def build_tree(acc=None, parent=None, atleastone=False):
import random
acc = acc or []
if len(acc) > 50:
return acc
is_folder = atleastone
for i in range(random.randrange(3, 15)):
fn = models.StoredFileNode(
path='name{}'.format(i),
name='name{}'.format(i),
is_file=not is_folder,
node=self.node,
parent=parent._id,
provider='test',
materialized_path='{}/{}'.format(parent.materialized_path, 'name{}'.format(i)),
).wrapped()
fn.save()
random.randint(0, 5) == 1
if is_folder:
build_tree(acc, fn)
acc.append(fn)
is_folder = random.randint(0, 5) == 1
return acc
root = models.StoredFileNode(
path='root',
name='rootfolder',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/',
).wrapped()
root.save()
parent = models.StoredFileNode(
parent=root._id,
path='afolder',
name='folder_name',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/folder_name',
).wrapped()
parent.save()
branch = models.StoredFileNode(
path='afolder',
name='folder_name',
is_file=False,
node=self.node,
provider='test',
parent=parent._id,
materialized_path='/long/path/to/folder_name',
).wrapped()
branch.save()
round1 = build_tree(parent=branch, atleastone=True)
round2 = build_tree(parent=parent, atleastone=True)
stay_deleted = [branch.to_storage()] + [child.to_storage() for child in round1]
get_restored = [parent.to_storage()] + [child.to_storage() for child in round2]
branch.delete()
for data in stay_deleted:
assert_true(models.TrashedFileNode.load(data['_id']))
assert_is(models.StoredFileNode.load(data['_id']), None)
trashed = parent.delete()
for data in get_restored:
assert_true(models.TrashedFileNode.load(data['_id']))
assert_is(models.StoredFileNode.load(data['_id']), None)
trashed.restore()
for data in stay_deleted:
assert_true(models.TrashedFileNode.load(data['_id']))
assert_is(models.StoredFileNode.load(data['_id']), None)
for data in get_restored:
assert_is(models.TrashedFileNode.load(data['_id']), None)
assert_equals(models.StoredFileNode.load(data['_id']).to_storage(), data)
def test_metadata_url(self):
pass
def test_move_under(self):
pass
def test_copy_under(self):
pass
def test_attr_passthrough(self):
stored = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
)
stored.test = 'Foo'
stored.bar = ['wat']
wrapped = stored.wrapped()
wrapped.bar.append('wat')
assert_equal(stored.bar, wrapped.bar)
assert_equal(stored.test, wrapped.test)
def test_repr(self):
child = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
assert_true(isinstance(child.__repr__(), basestring))
class TestFileObj(FilesTestCase):
def test_get_version(self):
v1 = models.FileVersion(identifier='1')
v2 = models.FileVersion(identifier='2')
v1.save()
v2.save()
file = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
file.versions.extend([v1, v2])
assert_equals(file.get_version('1'), v1)
assert_equals(file.get_version('2', required=True), v2)
assert_is(file.get_version('3'), None)
with assert_raises(exceptions.VersionNotFoundError):
file.get_version('3', required=True)
def test_update_version_metadata(self):
v1 = models.FileVersion(identifier='1')
v1.save()
file = models.StoredFileNode(
path='afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
file.versions.append(v1)
file.update_version_metadata(None, {'size': 1337})
with assert_raises(exceptions.VersionNotFoundError):
file.update_version_metadata('3', {})
assert_equal(v1.size, 1337)
@mock.patch('website.files.models.base.requests.get')
def test_touch(self, mock_requests):
file = models.StoredFileNode(
path='/afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
mock_requests.return_value = mock.Mock(status_code=400)
assert_is(file.touch(None), None)
mock_response = mock.Mock(status_code=200)
mock_response.json.return_value = {
'data': {
'attributes': {
'name': 'fairly',
'modified': '2015',
'size': 0xDEADBEEF,
'materialized': 'ephemeral',
}
}
}
mock_requests.return_value = mock_response
v = file.touch(None)
assert_equals(v.size, 0xDEADBEEF)
assert_equals(len(file.versions), 0)
@mock.patch('website.files.models.base.requests.get')
def test_touch_caching(self, mock_requests):
file = models.StoredFileNode(
path='/afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
mock_response = mock.Mock(status_code=200)
mock_response.json.return_value = {
'data': {
'attributes': {
'name': 'fairly',
'modified': '2015',
'size': 0xDEADBEEF,
'materialized': 'ephemeral',
}
}
}
mock_requests.return_value = mock_response
v = file.touch(None, revision='foo')
assert_equals(len(file.versions), 1)
assert_is(file.touch(None, revision='foo'), v)
@mock.patch('website.files.models.base.requests.get')
def test_touch_auth(self, mock_requests):
file = models.StoredFileNode(
path='/afile',
name='name',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
mock_response = mock.Mock(status_code=404)
mock_requests.return_value = mock_response
file.touch('Bearer bearer', revision='foo')
assert_equal(mock_requests.call_args[1]['headers'], {
'Authorization': 'Bearer bearer'
})
def test_download_url(self):
pass
def test_serialize(self):
pass
def test_get_download_count(self):
pass
class TestFolderObj(FilesTestCase):
def setUp(self):
super(TestFolderObj, self).setUp()
self.parent = models.StoredFileNode(
path='aparent',
name='parent',
is_file=False,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
self.parent.save()
def test_children(self):
models.StoredFileNode(
path='afile',
name='child',
is_file=True,
node=self.node,
parent=self.parent._id,
provider='test',
materialized_path='/long/path/to/name',
).save()
assert_equal(len(list(self.parent.children)), 1)
models.StoredFileNode(
path='afile2',
name='child2',
is_file=True,
node=self.node,
parent=self.parent._id,
provider='test',
materialized_path='/long/path/to/name',
).save()
assert_equal(len(list(self.parent.children)), 2)
def test_delete(self):
child = models.StoredFileNode(
path='afile',
name='child',
is_file=True,
node=self.node,
parent=self.parent._id,
provider='test',
materialized_path='/long/path/to/name',
).wrapped()
child.save()
guid = self.parent.get_guid(create=True)
child_guid = child.get_guid(create=True)
trashed_parent = self.parent.delete(user=self.user)
guid.reload()
child_guid.reload()
assert_equal(
trashed_parent,
models.TrashedFileNode.load(child._id).parent
)
assert_equal(trashed_parent, guid.referent)
assert_equal(child_guid.referent, models.TrashedFileNode.load(child._id))
def test_append_file(self):
self.parent.append_file('Name')
(child, ) = list(self.parent.children)
def test_append_folder(self):
pass
def test_find_child_by_name(self):
pass
class TestUtils(FilesTestCase):
def test_genwrapper_repr(self):
wrapped = models.FileNode.find()
assert_true(isinstance(wrapped, utils.GenWrapper))
assert_in(wrapped.mqs.__repr__(), wrapped.__repr__())
def test_genwrapper_getattr(self):
with assert_raises(AttributeError) as e:
models.FileNode.find().test
assert_equal(e.exception.message, "'GenWrapper' object has no attribute 'test'")
class TestFileVersion(FilesTestCase):
pass
class TestSubclasses(FilesTestCase):
@mock.patch.object(models.File, 'touch')
def test_s3file(self, mock_touch):
file = models.S3File.create(
path='afile2',
name='child2',
is_file=True,
node=self.node,
provider='test',
materialized_path='/long/path/to/name',
)
file.touch(None)
file.touch('bar', version='foo')
file.touch(None, version='zyzz', bar='baz')
mock_touch.assert_has_calls([
mock.call(None),
mock.call('bar', version='foo'),
mock.call(None, version='zyzz', bar='baz'),
])
| apache-2.0 |
solashirai/edx-platform | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 46 | 5259 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import unittest
import httpretty
from mock import patch
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
from .base import IntegrationTestMixin
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(IntegrationTestMixin, testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
PROVIDER_ID = "saml-testshib"
PROVIDER_NAME = "TestShib"
PROVIDER_BACKEND = "tpa-saml"
USER_EMAIL = "myself@testshib.org"
USER_NAME = "Me Myself And I"
USER_USERNAME = "myself"
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
testshib_login_url = self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(testshib_login_url)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_login(self):
""" Configure TestShib before running the login test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_login()
def test_register(self):
""" Configure TestShib before running the register test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_register()
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def do_provider_login(self, provider_redirect_url):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
self.assertTrue(provider_redirect_url.startswith(TESTSHIB_SSO_URL))
return self.client.post(
self.complete_url,
content_type='application/x-www-form-urlencoded',
data=self.read_data_file('testshib_response.txt'),
)
| agpl-3.0 |
vovojh/gem5 | tests/configs/memtest-filter.py | 23 | 3208 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
from Caches import *
#MAX CORES IS 8 with the fals sharing method
nb_cores = 8
cpus = [ MemTest() for i in xrange(nb_cores) ]
# system simulated
system = System(cpu = cpus,
physmem = SimpleMemory(),
membus = SystemXBar(width=16, snoop_filter = SnoopFilter()))
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain()
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain = system.voltage_domain)
# Create a seperate clock domain for components that should run at
# CPUs frequency
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz',
voltage_domain = system.voltage_domain)
system.toL2Bus = L2XBar(clk_domain = system.cpu_clk_domain,
snoop_filter = SnoopFilter())
system.l2c = L2Cache(clk_domain = system.cpu_clk_domain, size='64kB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
# connect l2c to membus
system.l2c.mem_side = system.membus.slave
# add L1 caches
for cpu in cpus:
# All cpus are associated with cpu_clk_domain
cpu.clk_domain = system.cpu_clk_domain
cpu.l1c = L1Cache(size = '32kB', assoc = 4)
cpu.l1c.cpu_side = cpu.port
cpu.l1c.mem_side = system.toL2Bus.slave
system.system_port = system.membus.slave
# connect memory to membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
root.system.mem_mode = 'timing'
| bsd-3-clause |
apache/arrow | python/pyarrow/tests/test_feather.py | 4 | 22981 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import sys
import tempfile
import pytest
import hypothesis as h
import hypothesis.strategies as st
import numpy as np
import pyarrow as pa
import pyarrow.tests.strategies as past
from pyarrow.feather import (read_feather, write_feather, read_table,
FeatherDataset)
try:
from pandas.testing import assert_frame_equal
import pandas as pd
import pyarrow.pandas_compat
except ImportError:
pass
@pytest.fixture(scope='module')
def datadir(base_datadir):
return base_datadir / 'feather'
def random_path(prefix='feather_'):
return tempfile.mktemp(prefix=prefix)
@pytest.fixture(scope="module", params=[1, 2])
def version(request):
yield request.param
@pytest.fixture(scope="module", params=[None, "uncompressed", "lz4", "zstd"])
def compression(request):
yield request.param
TEST_FILES = None
def setup_module(module):
global TEST_FILES
TEST_FILES = []
def teardown_module(module):
for path in TEST_FILES:
try:
os.remove(path)
except os.error:
pass
@pytest.mark.pandas
def test_file_not_exist():
with pytest.raises(pa.ArrowIOError):
read_feather('test_invalid_file')
def _check_pandas_roundtrip(df, expected=None, path=None,
columns=None, use_threads=False,
version=None, compression=None,
compression_level=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(df, path, compression=compression,
compression_level=compression_level, version=version)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, use_threads=use_threads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def _check_arrow_roundtrip(table, path=None, compression=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(table, path, compression=compression)
if not os.path.exists(path):
raise Exception('file not written')
result = read_table(path)
assert result.equals(table)
def _assert_error_on_write(df, exc, path=None, version=2):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
TEST_FILES.append(path)
def f():
write_feather(df, path, version=version)
pytest.raises(exc, f)
def test_dataset(version):
num_values = (100, 100)
num_files = 5
paths = [random_path() for i in range(num_files)]
data = {
"col_" + str(i): np.random.randn(num_values[0])
for i in range(num_values[1])
}
table = pa.table(data)
TEST_FILES.extend(paths)
for index, path in enumerate(paths):
rows = (
index * (num_values[0] // num_files),
(index + 1) * (num_values[0] // num_files),
)
write_feather(table[rows[0]: rows[1]], path, version=version)
data = FeatherDataset(paths).read_table()
assert data.equals(table)
@pytest.mark.pandas
def test_float_no_nulls(version):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_read_table(version):
num_values = (100, 100)
path = random_path()
TEST_FILES.append(path)
values = np.random.randint(0, 100, size=num_values)
columns = ['col_' + str(i) for i in range(100)]
table = pa.Table.from_arrays(values, columns)
write_feather(table, path, version=version)
result = read_table(path)
assert result.equals(table)
# Test without memory mapping
result = read_table(path, memory_map=False)
assert result.equals(table)
result = read_feather(path, memory_map=False)
assert_frame_equal(table.to_pandas(), result)
@pytest.mark.pandas
def test_float_nulls(version):
num_values = 100
path = random_path()
TEST_FILES.append(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
arrays = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
arrays.append(pa.array(values, mask=null_mask))
values[null_mask] = np.nan
expected_cols.append(values)
table = pa.table(arrays, names=dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_no_nulls(version):
data, arr = {}, []
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
arr.append(values.astype(dtype))
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
table = pa.table(arr, names=numpy_dtypes)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_platform_numpy_integers(version):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_with_nulls(version):
# pandas requires upcast to float dtype
path = random_path()
TEST_FILES.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
arrays = []
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arrays.append(pa.array(values, mask=null_mask))
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
table = pa.table(arrays, names=int_dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_no_nulls(version):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_nulls(version):
# pandas requires upcast to object dtype
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
table = pa.table([pa.array(values, mask=mask)], names=['bools'])
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
def test_buffer_bounds_error(version):
# ARROW-1676
path = random_path()
TEST_FILES.append(path)
for i in range(16, 256):
table = pa.Table.from_arrays(
[pa.array([None] + list(range(i)), type=pa.float64())],
names=["arr"]
)
_check_arrow_roundtrip(table)
def test_boolean_object_nulls(version):
repeats = 100
table = pa.Table.from_arrays(
[np.array([False, None, True] * repeats, dtype=object)],
names=["arr"]
)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_delete_partial_file_on_error(version):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass:
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, 'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path, version=version)
except Exception:
pass
assert not os.path.exists(path)
@pytest.mark.pandas
def test_strings(version):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_empty_strings(version):
df = pd.DataFrame({'strings': [''] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_none(version):
df = pd.DataFrame({'all_none': [None] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_null_category(version):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_multithreaded_read(version):
data = {'c{}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, use_threads=True, version=version)
@pytest.mark.pandas
def test_nan_as_null(version):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_category(version):
repeats = 1000
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_timestamp(version):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_timestamp_with_nulls(version):
df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1),
None,
pd.Timestamp(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
@pytest.mark.xfail(reason="not supported", raises=TypeError)
def test_timedelta_with_nulls_v1():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=1)
@pytest.mark.pandas
def test_timedelta_with_nulls():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=2)
@pytest.mark.pandas
def test_out_of_float64_timestamp_with_nulls(version):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_non_string_columns(version):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(version):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
_check_pandas_roundtrip(df, path=random_path(prefix=name),
version=version)
@pytest.mark.pandas
def test_read_columns(version):
df = pd.DataFrame({
'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]
})
expected = df[['boo', 'woo']]
_check_pandas_roundtrip(df, expected, version=version,
columns=['boo', 'woo'])
def test_overwritten_file(version):
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
table = pa.table({'ints': values})
write_feather(table, path)
table = pa.table({'more_ints': values[0:num_values//2]})
_check_arrow_roundtrip(table, path=path)
@pytest.mark.pandas
def test_filelike_objects(version):
buf = io.BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf, version=version)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
def test_sparse_dataframe(version):
if not pa.pandas_compat._pandas_api.has_sparse:
pytest.skip("version of pandas does not support SparseDataFrame")
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_duplicate_columns_pandas():
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
_assert_error_on_write(df, ValueError)
def test_duplicate_columns():
# only works for version 2
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'a', 'b'])
_check_arrow_roundtrip(table)
_assert_error_on_write(table, ValueError, version=1)
@pytest.mark.pandas
def test_unsupported():
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# custom python objects
class A:
pass
df = pd.DataFrame({'a': [A(), A()]})
_assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
_assert_error_on_write(df, TypeError)
@pytest.mark.pandas
def test_v2_set_chunksize():
df = pd.DataFrame({'A': np.arange(1000)})
table = pa.table(df)
buf = io.BytesIO()
write_feather(table, buf, chunksize=250, version=2)
result = buf.getvalue()
ipc_file = pa.ipc.open_file(pa.BufferReader(result))
assert ipc_file.num_record_batches == 4
assert len(ipc_file.get_batch(0)) == 250
@pytest.mark.pandas
def test_v2_compression_options():
df = pd.DataFrame({'A': np.arange(1000)})
cases = [
# compression, compression_level
('uncompressed', None),
('lz4', None),
('zstd', 1),
('zstd', 10)
]
for compression, compression_level in cases:
_check_pandas_roundtrip(df, compression=compression,
compression_level=compression_level)
buf = io.BytesIO()
# LZ4 doesn't support compression_level
with pytest.raises(pa.ArrowInvalid,
match="doesn't support setting a compression level"):
write_feather(df, buf, compression='lz4', compression_level=10)
# Trying to compress with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support compression option"):
write_feather(df, buf, compression='lz4', version=1)
# Trying to set chunksize with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support chunksize option"):
write_feather(df, buf, chunksize=4096, version=1)
# Unsupported compressor
with pytest.raises(ValueError,
match='compression="snappy" not supported'):
write_feather(df, buf, compression='snappy')
def test_v2_lz4_default_compression():
# ARROW-8750: Make sure that the compression=None option selects lz4 if
# it's available
if not pa.Codec.is_available('lz4_frame'):
pytest.skip("LZ4 compression support is not built in C++")
# some highly compressible data
t = pa.table([np.repeat(0, 100000)], names=['f0'])
buf = io.BytesIO()
write_feather(t, buf)
default_result = buf.getvalue()
buf = io.BytesIO()
write_feather(t, buf, compression='uncompressed')
uncompressed_result = buf.getvalue()
assert len(default_result) < len(uncompressed_result)
def test_v1_unsupported_types():
table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0'])
buf = io.BytesIO()
with pytest.raises(TypeError,
match=("Unsupported Feather V1 type: "
"list<item: int64>. "
"Use V2 format to serialize all Arrow types.")):
write_feather(table, buf, version=1)
@pytest.mark.slow
@pytest.mark.pandas
def test_large_dataframe(version):
df = pd.DataFrame({'A': np.arange(400000000)})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.large_memory
@pytest.mark.pandas
def test_chunked_binary_error_message():
# ARROW-3058: As Feather does not yet support chunked columns, we at least
# make sure it's clear to the user what is going on
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
# Works fine with version 2
buf = io.BytesIO()
write_feather(df, buf, version=2)
result = read_feather(pa.BufferReader(buf.getvalue()))
assert_frame_equal(result, df)
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
"capacity of a Feather binary column. This restriction "
"may be lifted in the future"):
write_feather(df, io.BytesIO(), version=1)
def test_feather_without_pandas(tempdir, version):
# ARROW-8345
table = pa.table([pa.array([1, 2, 3])], names=['f0'])
path = str(tempdir / "data.feather")
_check_arrow_roundtrip(table, path)
@pytest.mark.pandas
def test_read_column_selection(version):
# ARROW-8641
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=['a', 'b', 'c'])
# select columns as string names or integer indices
_check_pandas_roundtrip(
df, columns=['a', 'c'], expected=df[['a', 'c']], version=version)
_check_pandas_roundtrip(
df, columns=[0, 2], expected=df[['a', 'c']], version=version)
# different order is followed
_check_pandas_roundtrip(
df, columns=['b', 'a'], expected=df[['b', 'a']], version=version)
_check_pandas_roundtrip(
df, columns=[1, 0], expected=df[['b', 'a']], version=version)
def test_read_column_duplicated_selection(tempdir, version):
# duplicated columns in the column selection
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'c'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=version)
expected = pa.table([[1, 2, 3], [4, 5, 6], [1, 2, 3]],
names=['a', 'b', 'a'])
for col_selection in [['a', 'b', 'a'], [0, 1, 0]]:
result = read_table(path, columns=col_selection)
assert result.equals(expected)
def test_read_column_duplicated_in_file(tempdir):
# duplicated columns in feather file (only works for feather v2)
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'a'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=2)
# no selection works fine
result = read_table(path)
assert result.equals(table)
# selection with indices works
result = read_table(path, columns=[0, 2])
assert result.column_names == ['a', 'a']
# selection with column names errors
with pytest.raises(ValueError):
read_table(path, columns=['a', 'b'])
def test_nested_types(compression):
# https://issues.apache.org/jira/browse/ARROW-8860
table = pa.table({'col': pa.StructArray.from_arrays(
[[0, 1, 2], [1, 2, 3]], names=["f1", "f2"])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[1, 2], [3, 4]])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[[1, 2], [3, 4]], [[5, 6], None]])})
_check_arrow_roundtrip(table, compression=compression)
@h.given(past.all_tables, st.sampled_from(["uncompressed", "lz4", "zstd"]))
def test_roundtrip(table, compression):
_check_arrow_roundtrip(table, compression=compression)
def test_feather_v017_experimental_compression_backward_compatibility(datadir):
# ARROW-11163 - ensure newer pyarrow versions can read the old feather
# files from version 0.17.0 with experimental compression support (before
# it was officially added to IPC format in 1.0.0)
# file generated with:
# table = pa.table({'a': range(5)})
# from pyarrow import feather
# feather.write_feather(
# table, "v0.17.0.version=2-compression=lz4.feather",
# compression="lz4", version=2)
expected = pa.table({'a': range(5)})
result = read_table(datadir / "v0.17.0.version=2-compression=lz4.feather")
assert result.equals(expected)
| apache-2.0 |
wenn/python_koans | python2/koans/about_tuples.py | 1 | 2479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch("'tuple' object does not support item assignment", ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
# Note, assertMatch() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertMatch("'tuple' object has no attribute 'append'", ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(('Hello comma!',), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual((), ())
self.assertEqual((), tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual('Cthulhu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| mit |
Darkdadaah/pywikibot-core | tests/logentry_tests.py | 2 | 11005 | # -*- coding: utf-8 -*-
"""Test logentries module."""
#
# (C) Pywikibot team, 2015-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import datetime
import pywikibot
from pywikibot.logentries import LogEntryFactory
from pywikibot.tools import (
MediaWikiVersion,
UnicodeType as unicode,
)
from tests import unittest_print
from tests.aspects import (
unittest, MetaTestCaseClass, TestCase, DeprecationTestCase
)
from tests.utils import add_metaclass
class TestLogentriesBase(TestCase):
"""
Base class for log entry tests.
It uses the German Wikipedia for a current representation of the log entries
and the test Wikipedia for the future representation. It also tests on a
wiki with MW 1.19 or older to check that it can still read the older format.
It currently uses lyricwiki which as of this commit uses 1.19.24.
"""
sites = {
'tewp': {
'family': 'wikipedia',
'code': 'test',
'target': 'Main Page on wheels',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
'target': 'Hauptseite',
},
'old': {
'family': 'lyricwiki',
'code': 'en',
'target': None,
}
}
def _get_logentry(self, logtype):
"""Retrieve a single log entry."""
if self.site_key == 'old':
# This is an assertion as the tests don't make sense with newer
# MW versions and otherwise it might not be visible that the test
# isn't run on an older wiki.
self.assertLess(MediaWikiVersion(self.site.version()),
MediaWikiVersion('1.20'))
return next(iter(self.site.logevents(logtype=logtype, total=1)))
def _test_logevent(self, logtype):
"""Test a single logtype entry."""
logentry = self._get_logentry(logtype)
if logtype in LogEntryFactory.logtypes:
self.assertEqual(logentry._expectedType, logtype)
else:
self.assertIsNone(logentry._expectedType)
if self.site_key == 'old':
self.assertNotIn('params', logentry.data)
else:
self.assertNotIn(logentry.type(), logentry.data)
self.assertIsInstance(logentry.action(), unicode)
self.assertIsInstance(logentry.comment(), unicode)
self.assertIsInstance(logentry.logid(), int)
self.assertIsInstance(logentry.ns(), int)
self.assertIsInstance(logentry.pageid(), int)
self.assertIsInstance(logentry.timestamp(), pywikibot.Timestamp)
if 'title' in logentry.data: # title may be missing
if logtype == 'block' and logentry.isAutoblockRemoval:
self.assertIsInstance(logentry.page(), int)
else:
self.assertIsInstance(logentry.page(), pywikibot.Page)
else:
self.assertRaises(KeyError, logentry.page)
self.assertEqual(logentry.type(), logtype)
self.assertIsInstance(logentry.user(), unicode)
self.assertGreaterEqual(logentry.logid(), 0)
self.assertGreaterEqual(logentry.ns(), -2)
self.assertGreaterEqual(logentry.pageid(), 0)
class TestLogentriesMeta(MetaTestCaseClass):
"""Test meta class for TestLogentries."""
def __new__(cls, name, bases, dct):
"""Create the new class."""
def test_method(logtype):
def test_logevent(self, key):
"""Test a single logtype entry."""
self._test_logevent(logtype)
return test_logevent
# create test methods for the support logtype classes
for logtype in LogEntryFactory.logtypes:
cls.add_method(dct, 'test_%sEntry' % logtype.title(),
test_method(logtype))
return super(TestLogentriesMeta, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestLogentries(TestLogentriesBase):
"""Test general LogEntry properties."""
__metaclass__ = TestLogentriesMeta
class TestSimpleLogentries(TestLogentriesBase):
"""Test logentry classes without special classes."""
def test_simple_entries(self, key):
"""Test those entries which don't have an extra LogEntry subclass."""
# Unfortunately it's not possible to use the metaclass to create a
# bunch of test methods for this too as the site instances haven't been
# initialized yet.
available_types = set(self.site._paraminfo.parameter(
'query+logevents', 'type')['type'])
for simple_type in available_types - set(LogEntryFactory.logtypes):
if not simple_type:
# paraminfo also reports an empty string as a type
continue
try:
self._test_logevent(simple_type)
except StopIteration:
unittest_print(
'Unable to test "{0}" on "{1}" because there are no log '
'entries with that type.'.format(simple_type, key))
class TestLogentryParams(TestLogentriesBase):
"""Test LogEntry properties specific to their action."""
def test_BlockEntry(self, key):
"""Test BlockEntry methods."""
# only 'block' entries can be tested
for logentry in self.site.logevents(logtype='block', total=5):
if logentry.action() == 'block':
self.assertIsInstance(logentry.flags(), list)
# Check that there are no empty strings
self.assertTrue(all(logentry.flags()))
if logentry.expiry() is not None:
self.assertIsInstance(logentry.expiry(), pywikibot.Timestamp)
self.assertIsInstance(logentry.duration(), datetime.timedelta)
self.assertEqual(logentry.timestamp() + logentry.duration(),
logentry.expiry())
else:
self.assertIsNone(logentry.duration())
break
def test_RightsEntry(self, key):
"""Test RightsEntry methods."""
logentry = self._get_logentry('rights')
self.assertIsInstance(logentry.oldgroups, list)
self.assertIsInstance(logentry.newgroups, list)
def test_MoveEntry(self, key):
"""Test MoveEntry methods."""
logentry = self._get_logentry('move')
self.assertIsInstance(logentry.target_ns, pywikibot.site.Namespace)
self.assertEqual(logentry.target_page.namespace(),
logentry.target_ns.id)
self.assertIsInstance(logentry.target_title, unicode)
self.assertIsInstance(logentry.target_page, pywikibot.Page)
self.assertIsInstance(logentry.suppressedredirect(), bool)
def test_PatrolEntry(self, key):
"""Test PatrolEntry methods."""
logentry = self._get_logentry('patrol')
self.assertIsInstance(logentry.current_id, int)
self.assertIsInstance(logentry.previous_id, int)
self.assertIsInstance(logentry.auto, bool)
def test_moved_target(self, key):
"""Test moved_target method."""
# main page was moved around
mainpage = self.get_mainpage(self.site)
if self.sites[key]['target'] is None:
raise unittest.SkipTest('No moved target')
target = mainpage.moved_target()
self.assertIsInstance(target, pywikibot.Page)
self.assertEqual(target.title(),
self.sites[key]['target'])
# main page was moved back again, we test it.
self.assertEqual(mainpage, target.moved_target())
def test_moved_target_fail_old(self):
"""Test moved_target method failing on older wiki."""
site = self.get_site('old')
with self.assertRaises(pywikibot.NoMoveTarget):
self.get_mainpage(site).moved_target()
def test_moved_target_fail_de(self):
"""Test moved_target method failing on de-wiki."""
page = pywikibot.Page(self.get_site('dewp'), 'Main Page')
with self.assertRaises(pywikibot.NoMoveTarget):
page.moved_target()
class TestDeprecatedMethods(TestLogentriesBase, DeprecationTestCase):
"""Test cases for deprecated logentry methods."""
def test_MoveEntry(self, key):
"""Test deprecated MoveEntry methods."""
logentry = self._get_logentry('move')
self.assertIsInstance(logentry.new_ns(), int)
self.assertOneDeprecationParts('pywikibot.logentries.MoveEntry.new_ns',
'target_ns.id')
self.assertEqual(logentry.new_title(), logentry.target_page)
self.assertOneDeprecationParts(
'pywikibot.logentries.MoveEntry.new_title', 'target_page')
def test_LogEntry_title(self, key):
"""Test title and page return the same instance."""
# Request multiple log entries in the hope that one might have no
# title entry
for logentry in self.site.logevents(total=5):
if 'title' in logentry.data: # title may be missing
self.assertIsInstance(logentry.title(), pywikibot.Page)
self.assertIs(logentry.title(), logentry.page())
self.assertOneDeprecation(count=2)
else:
self.assertRaises(KeyError, logentry.title)
self.assertOneDeprecation()
def test_getMovedTarget(self, key):
"""Test getMovedTarget method."""
# main page was moved around
if self.sites[key]['target'] is None:
raise unittest.SkipTest('No moved target')
mainpage = self.get_mainpage(self.site)
target = mainpage.getMovedTarget()
self.assertIsInstance(target, pywikibot.Page)
self.assertEqual(target.title(),
self.sites[key]['target'])
# main page was moved back again, we test it.
self.assertEqual(mainpage, target.getMovedTarget())
self.assertOneDeprecationParts(
'pywikibot.page.BasePage.getMovedTarget', 'moved_target()', 2)
def test_moved_target_fail_old(self):
"""Test getMovedTarget method failing on older wiki."""
site = self.get_site('old')
with self.assertRaises(pywikibot.NoPage):
self.get_mainpage(site).getMovedTarget()
self.assertOneDeprecationParts('pywikibot.page.BasePage.getMovedTarget',
'moved_target()')
def test_moved_target_fail_de(self):
"""Test getMovedTarget method failing on de-wiki."""
page = pywikibot.Page(self.get_site('dewp'), 'Main Page')
with self.assertRaises(pywikibot.NoPage):
page.getMovedTarget()
self.assertOneDeprecationParts('pywikibot.page.BasePage.getMovedTarget',
'moved_target()')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit |
JohnFrazier/pacman | test/pacman/tests/sync306.py | 23 | 1479 | self.description = "install with recursive/cascading deps"
sp = pmpkg("pacman", "4.0.1-2")
sp.depends = ["glibc>=2.15", "curl"]
self.addpkg2db("sync", sp)
glibcdep = pmpkg("glibc", "2.15-1")
self.addpkg2db("sync", glibcdep)
gcldep = pmpkg("gcc-libs", "4.6.2-5")
gcldep.depends = ["glibc>=2.14"]
self.addpkg2db("sync", gcldep)
curldep = pmpkg("curl", "7.23.1-2")
curldep.depends = ["openssl"]
self.addpkg2db("sync", curldep)
openssldep = pmpkg("openssl", "1.0.0.e-1")
openssldep.depends = ["perl"]
self.addpkg2db("sync", openssldep)
gccdep = pmpkg("gcc", "4.6.2-5")
gccdep.depends = ["gcc-libs=4.6.2-5"]
self.addpkg2db("sync", gccdep)
perldep = pmpkg("perl", "5.14.2-5")
perldep.depends = ["db"]
self.addpkg2db("sync", perldep)
dbdep = pmpkg("db", "5.2.36-2")
dbdep.depends = ["gcc-libs"]
self.addpkg2db("sync", dbdep)
lp = pmpkg("pacman", "4.0.1-1")
lp.depends = ["glibc>=2.14", "curl"]
self.addpkg2db("local", lp)
lp2 = pmpkg("glibc", "2.14-2")
self.addpkg2db("local", lp2)
lp3 = pmpkg("curl", "7.23.1-2")
self.addpkg2db("local", lp3)
lp4 = pmpkg("gcc-libs", "4.6.2-3")
self.addpkg2db("local", lp4)
lp5 = pmpkg("gcc", "4.6.2-3")
lp5.depends = ["gcc-libs=4.6.2-3"]
self.addpkg2db("local", lp5)
lp6 = pmpkg("perl", "5.14.2-5")
lp6.depends = ["db"]
self.addpkg2db("local", lp6)
lp7 = pmpkg("db", "5.2.36-2")
lp7.depends = ["gcc-libs"]
self.addpkg2db("local", lp7)
self.args = "-S pacman"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=pacman|4.0.1-2")
| gpl-2.0 |
jorge2703/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
hologram-io/hologram-python | tests/MessageMode/test_CustomCloud.py | 1 | 1993 | # Author: Hologram <support@hologram.io>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
# LICENSE: Distributed under the terms of the MIT License
#
# test_CustomCloud.py - This file implements unit tests for the CustomCloud class.
import pytest
import sys
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Authentication import *
from Hologram.CustomCloud import CustomCloud
class TestCustomCloud:
def test_create_send(self):
customCloud = CustomCloud(None, send_host='127.0.0.1',
send_port=9999, enable_inbound=False)
assert customCloud.send_host == '127.0.0.1'
assert customCloud.send_port == 9999
assert customCloud.receive_host == ''
assert customCloud.receive_port == 0
def test_create_receive(self):
customCloud = CustomCloud(None, receive_host='127.0.0.1',
receive_port=9999, enable_inbound=False)
assert customCloud.send_host == ''
assert customCloud.send_port == 0
assert customCloud.receive_host == '127.0.0.1'
assert customCloud.receive_port == 9999
def test_enable_inbound(self):
with pytest.raises(Exception, match='Must set receive host and port for inbound connection'):
customCloud = CustomCloud(None, send_host='receive.com',
send_port=9999, enable_inbound=True)
def test_invalid_send_host_and_port(self):
customCloud = CustomCloud(None, receive_host='receive.com', receive_port=9999)
with pytest.raises(Exception, match = 'Send host and port must be set before making this operation'):
customCloud.sendMessage("hello")
def test_invalid_send_sms(self):
customCloud = CustomCloud(None, 'test.com', 9999)
temp = "hello"
with pytest.raises(NotImplementedError, match='Cannot send SMS via custom cloud'):
customCloud.sendSMS('+1234567890', temp)
| mit |
markap/TravelMap | boilerplate/external/requests/packages/charade/latin1prober.py | 950 | 5241 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| lgpl-3.0 |
arenadata/ambari | ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-ANY/scripts/setup_jdk.py | 4 | 1602 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import urllib2
from ambari_commons.inet_utils import download_file
from resource_management import *
_install_cmd = '{0} /s INSTALLDIR={1} ADDLOCAL="ToolsFeature,SourceFeature"'
def _check_installed():
import params
return os.path.exists(os.path.join(params.java_home, 'bin', 'java.exe'))
def setup_jdk():
import params
if not params.jdk_name:
return
if _check_installed():
return
if not os.path.exists(params.java_home):
os.makedirs(params.java_home)
jdk_setup_savepath = os.path.join(params.java_home, params.jdk_name)
jdk_download_url = "{0}/{1}".format(params.jdk_location, params.jdk_name)
download_file(jdk_download_url, jdk_setup_savepath)
Execute(_install_cmd.format(jdk_setup_savepath, params.java_home))
if not _check_installed():
raise Fail("Error when installing jdk") | apache-2.0 |
repotvsupertuga/tvsupertuga.repository | plugin.video.SportsDevil/service/livestreamerXBMCLocalProxy.py | 6 | 8735 | """
XBMCLocalProxy 0.1
Copyright 2011 Torben Gerkensmeyer
Modified for Livestreamer by your mom 2k15
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import xbmc
import base64
import urlparse
import sys
import socket
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import struct
import livestreamer
from livestreamer.exceptions import StreamError
from urlparse import urljoin
##aes stuff
_android_ssl = False
_oscrypto = False
CAN_DECRYPT = True
try:
from Crypto.Cipher import AES
except ImportError:
try:
from Cryptodome.Cipher import AES
except ImportError:
try:
import androidsslPy
enc = androidsslPy._load_crypto_libcrypto()
_android_ssl = True
except:
try:
from oscrypto.symmetric import aes_cbc_no_padding_decrypt
class AES(object):
def __init__(self,key,iv):
self.key=key
self.iv=iv
def decrypt(self, data):
return aes_cbc_no_padding_decrypt(self.key, data, self.iv)
_oscrypto = True
except:
CAN_DECRYPT = False
def num_to_iv(n):
return struct.pack(">8xq", n)
def create_decryptor(self, key, sequence):
if key.method != "AES-128":
raise StreamError("Unable to decrypt cipher {0}", key.method)
if not key.uri:
raise StreamError("Missing URI to decryption key")
if self.key_uri != key.uri:
zoom_key = self.reader.stream.session.options.get("zoom-key")
saw_key = self.reader.stream.session.options.get("saw-key")
your_key = self.reader.stream.session.options.get("your-key")
if zoom_key:
uri = 'http://www.zoomtv.me/k.php?q='+base64.urlsafe_b64encode(zoom_key+base64.urlsafe_b64encode(key.uri))
elif saw_key:
if 'foxsportsgo' in key.uri:
_tmp = key.uri.split('/')
uri = urljoin(saw_key,'/m/fream?p='+_tmp[-4]+'&k='+_tmp[-1])
elif 'nlsk.neulion' in key.uri:
_tmp = key.uri.split('?')
uri = urljoin(saw_key,'/m/stream?'+_tmp[-1])
elif 'nhl.com' in key.uri:
_tmp = key.uri.split('/')
uri = urljoin(saw_key,'/m/streams?ci='+_tmp[-3]+'&k='+_tmp[-1])
elif your_key:
if 'mlb.com' in key.uri:
_tmp = key.uri.split('?')
uri = urljoin(your_key,'/mlb/get_key/'+_tmp[-1])
elif 'espn3/auth' in key.uri:
_tmp = key.uri.split('?')
uri = urljoin(your_key,'/ncaa/get_key/'+_tmp[-1])
elif 'nhl.com' in key.uri:
_tmp = key.uri.split('nhl.com/')
uri = urljoin(your_key,'/nhl/get_key/'+_tmp[-1])
else:
uri = key.uri
else:
uri = key.uri
res = self.session.http.get(uri, exception=StreamError,
**self.reader.request_params)
self.key_data = res.content
self.key_uri = key.uri
iv = key.iv or num_to_iv(sequence)
if _android_ssl:
return enc(self.key_data, iv)
elif _oscrypto:
return AES(self.key_data, iv)
else:
return AES.new(self.key_data, AES.MODE_CBC, iv)
def process_sequences(self, playlist, sequences):
first_sequence, last_sequence = sequences[0], sequences[-1]
if first_sequence.segment.key and first_sequence.segment.key.method != "NONE":
self.logger.debug("Segments in this playlist are encrypted")
if not CAN_DECRYPT:
raise StreamError("No crypto lib installed to decrypt this stream")
self.playlist_changed = ([s.num for s in self.playlist_sequences] !=
[s.num for s in sequences])
self.playlist_reload_time = (playlist.target_duration or
last_sequence.segment.duration)
self.playlist_sequences = sequences
if not self.playlist_changed:
self.playlist_reload_time = max(self.playlist_reload_time / 2, 1)
if playlist.is_endlist:
self.playlist_end = last_sequence.num
if self.playlist_sequence < 0:
if self.playlist_end is None:
edge_index = -(min(len(sequences), max(int(self.live_edge), 1)))
edge_sequence = sequences[edge_index]
self.playlist_sequence = edge_sequence.num
else:
self.playlist_sequence = first_sequence.num
class MyHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
"""
Serves a HEAD request
"""
def do_HEAD(self):
self.answer_request(0)
"""
Serves a GET request.
"""
def do_GET(self):
self.answer_request(1)
def answer_request(self, sendData):
try:
request_path = self.path[1:]
if request_path == "stop":
sys.exit()
elif request_path == "version":
self.send_response(200)
self.end_headers()
self.wfile.write("Proxy: Running\r\n")
self.wfile.write("Version: 0.1\r\n")
elif request_path[0:13] == "livestreamer/":
realpath = request_path[13:]
fURL = base64.urlsafe_b64decode(realpath)
self.serveFile(fURL, sendData)
else:
self.send_response(403)
self.end_headers()
finally:
return
"""
Sends the requested file and add additional headers.
"""
def serveFile(self, fURL, sendData):
session = livestreamer.session.Livestreamer()
#session.set_loglevel('debug')
livestreamer.stream.hls.HLSStreamWriter.create_decryptor = create_decryptor
livestreamer.stream.hls.HLSStreamWorker.process_sequences = process_sequences
if '|' in fURL:
sp = fURL.split('|')
fURL = sp[0]
headers = dict(urlparse.parse_qsl(sp[1]))
session.set_option("http-headers", headers)
session.set_option("http-ssl-verify", False)
session.set_option("hls-segment-threads", 1)
if 'zoomtv' in headers['Referer']:
session.set_option("zoom-key", headers['Referer'].split('?')[1])
elif 'sawlive' in headers['Referer']:
session.set_option("saw-key", headers['Referer'])
elif 'yoursportsinhd' in headers['Referer']:
session.set_option("your-key", headers['Referer'])
try:
streams = session.streams(fURL)
self.send_response(200)
except:
self.send_response(403)
finally:
self.end_headers()
if (sendData):
with streams["best"].open() as stream:
buf = 'INIT'
while (len(buf) > 0):
buf = stream.read(1000 * 1024)
self.wfile.write(buf)
class Server(HTTPServer):
"""HTTPServer class with timeout."""
timeout = 5
# def get_request(self):
# """Get the request and client address from the socket."""
# self.socket.settimeout(2.0)
# result = None
# while result is None:
# try:
# result = self.socket.accept()
# except socket.timeout:
# pass
# result[0].settimeout(1000)
# return result
class ThreadedHTTPServer(ThreadingMixIn, Server):
"""Handle requests in a separate thread."""
#HOST_NAME = '127.1.2.3'
#PORT_NUMBER = 45678
HOST_NAME = '127.0.0.1'
PORT_NUMBER = 19000
if __name__ == '__main__':
sys.stderr = sys.stdout
server_class = ThreadedHTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
while not xbmc.abortRequested:
httpd.handle_request()
httpd.server_close()
httpd.stopped = True | gpl-2.0 |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/django/db/backends/oracle/schema.py | 58 | 5290 | import binascii
import copy
import datetime
import re
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.text import force_text
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif isinstance(value, six.buffer_types):
return "'%s'" % force_text(binascii.hexlify(value))
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def delete_model(self, model):
# Run superclass action
super(DatabaseSchemaEditor, self).delete_model(model)
# Clean up any autoincrement trigger
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_type_workaround(model, old_field, new_field)
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = True
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements002.htm#sthref340
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match('^N?CLOB', old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = 'VARCHAR2'
if re.match('^N?VARCHAR2', old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == 'DateField':
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == 'DateTimeField':
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == 'TimeField':
# TimeField are stored as TIMESTAMP with a 1900-01-01 date part.
new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
))
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super(DatabaseSchemaEditor, self).alter_field(model, new_temp_field, new_field)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""
Generates temporary names for workarounds that need temp columns
"""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
| mit |
ahamilton55/ansible | lib/ansible/modules/cloud/openstack/os_security_group_rule.py | 22 | 11653 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name or ID of the security group
required: true
protocol:
description:
- IP protocols TCP UDP ICMP 112 (VRRP)
choices: ['tcp', 'udp', 'icmp', '112', None]
default: None
port_range_min:
description:
- Starting port
required: false
default: None
port_range_max:
description:
- Ending port
required: false
default: None
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
required: false
remote_group:
description:
- Name or ID of the Security group to link (exclusive with
remote_ip_prefix)
required: false
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
# Create a rule for VRRP with numbered protocol 112
- os_security_group_rule:
security_group: loadbalancer_sg
protocol: 112
remote_group: loadbalancer-node_sg
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
returned: state == present
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
returned: state == present
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
returned: state == present
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
returned: state == present
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
returned: state == present
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
returned: state == present
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by shade. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by shade. Since shade will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. Shade returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if user is supplying None values for full TCP/UDP port range.
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
if (rule_min and int(rule_min) == 1
and rule_max and int(rule_max) == 65535):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup, remotegroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
remote_group_id = remotegroup['id']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and remote_group_id == rule['remote_group_id']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup, remotegroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup, remotegroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
security_group = module.params['security_group']
remote_group = module.params['remote_group']
changed = False
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(security_group)
if remote_group:
remotegroup = cloud.get_security_group(remote_group)
else:
remotegroup = { 'id' : None }
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup, remotegroup)
if not rule:
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=remotegroup['id'],
direction=module.params['direction'],
ethertype=module.params['ethertype']
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup, remotegroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Bozhidariv/statisticum | statisticum/common/templatetags/common_tags.py | 1 | 2517 | import os
import re
from django.conf import settings
from django import template
from datetime import datetime
from statisticum.common import timezone
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext
register = template.Library()
@register.filter("share_twitter")
def share_twitter(item):
return "https://twitter.com/intent/tweet?text="+item.title+" - &url="
@register.filter("share_facebook")
def share_facebook(item):
return "https://www.facebook.com/sharer/sharer.php?u="
@register.filter("share_linkedin")
def share_linkedin(item):
return "http://www.linkedin.com/shareArticle?mini=true&url="
@register.filter("slugify")
def slugify(text):
return slughifi(text)
@register.filter("title")
def title(title):
return re.sub("[\(\[].*?[\)\]]", "", title)
@register.filter("game_url")
def game_url(game):
result = ""
if game:
for param in [game.title]:
if param:
result = result + " " + param
return slughifi(result)
def pretty_date(time, short=True):
if type(time) is not int and type(time) != datetime:
return ""
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
time = time.replace(tzinfo=timezone.utc)
now = timezone.now()
diff = now - time
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "now" if short else "just now"
if second_diff < 60:
return str(second_diff) + "s" if short else " seconds ago"
if second_diff < 120:
return "1 m" if short else "a minute ago"
if second_diff < 3600:
return str(second_diff // 60) + " m" if short else " minutes ago"
if second_diff < 7200:
return "1 h" if short else " an hour ago"
if second_diff < 86400:
return str(second_diff // 3600) + " h" if short else " hours ago"
if day_diff == 1:
return "1 d" if short else ugettext("Yesterday")
if day_diff < 7:
return str(day_diff) + " " + "d" if short else ugettext("days ago")
if day_diff < 31:
return str(int(day_diff//7)) + " w" if short else " weeks ago"
if day_diff < 365:
return str(int(day_diff//30)) + " months ago"
return str(int(day_diff // 365)) + " y" if short else " years ago"
| gpl-2.0 |
fyfcauc/android_external_chromium-org | third_party/android_testrunner/logger.py | 171 | 2449 | #!/usr/bin/python2.4
#
#
# Copyright 2007, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple logging utility. Dumps log messages to stdout, and optionally, to a
log file.
Init(path) must be called to enable logging to a file
"""
import datetime
_LOG_FILE = None
_verbose = False
_log_time = True
def Init(log_file_path):
"""Set the path to the log file"""
global _LOG_FILE
_LOG_FILE = log_file_path
print "Using log file: %s" % _LOG_FILE
def GetLogFilePath():
"""Returns the path and name of the Log file"""
global _LOG_FILE
return _LOG_FILE
def Log(new_str):
"""Appends new_str to the end of _LOG_FILE and prints it to stdout.
Args:
# new_str is a string.
new_str: 'some message to log'
"""
msg = _PrependTimeStamp(new_str)
print msg
_WriteLog(msg)
def _WriteLog(msg):
global _LOG_FILE
if _LOG_FILE is not None:
file_handle = file(_LOG_FILE, 'a')
file_handle.write('\n' + str(msg))
file_handle.close()
def _PrependTimeStamp(log_string):
"""Returns the log_string prepended with current timestamp """
global _log_time
if _log_time:
return "# %s: %s" % (datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S"),
log_string)
else:
# timestamp logging disabled
return log_string
def SilentLog(new_str):
"""Silently log new_str. Unless verbose mode is enabled, will log new_str
only to the log file
Args:
# new_str is a string.
new_str: 'some message to log'
"""
global _verbose
msg = _PrependTimeStamp(new_str)
if _verbose:
print msg
_WriteLog(msg)
def SetVerbose(new_verbose=True):
""" Enable or disable verbose logging"""
global _verbose
_verbose = new_verbose
def SetTimestampLogging(new_timestamp=True):
""" Enable or disable outputting a timestamp with each log entry"""
global _log_time
_log_time = new_timestamp
def main():
pass
if __name__ == '__main__':
main()
| bsd-3-clause |
repotvsupertuga/repo | instal/script.module.stream.tvsupertuga.addon/resources/lib/sources/en/miradetodo.py | 8 | 6979 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['miradetodo.io']
self.base_link = 'http://miradetodo.io'
self.search_link = '/?s=%s'
self.episode_link = '/episodio/%s-%sx%s'
self.tvshow_link = '/series/%s/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.request(t, headers={'Accept-Language': 'es-AR'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip().encode('utf-8')
q = self.search_link % urllib.quote_plus(t)
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tt'}), client.parseDOM(i, 'span', attrs = {'class': 'year'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.geturl(tvshowtitle)
q = self.tvshow_link % t
q = urlparse.urljoin(self.base_link, q)
r = client.request(q, output='geturl')
if not r:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.request(t, headers={'Accept-Language': 'es-AR'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\(TV Series.+', '', t).strip().encode('utf-8')
q = self.search_link % urllib.quote_plus(t)
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'span', attrs = {'class': 'tt'}), client.parseDOM(r, 'span', attrs = {'class': 'year'}))
r = [(i[0], re.sub('(?:\(|\s)\('+year+'.+', '', i[1]).strip().encode('utf-8'), i[2]) for i in r if len(i[0]) > 0 and '/series/' in i[0] and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if year == i[2]][0]
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'url': r}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
show = data['url'].split('/')[4]
r = urlparse.urljoin(self.base_link, self.episode_link % (show, season, episode))
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = urlparse.urljoin(self.base_link, url)
result = client.request(r)
f = client.parseDOM(result, 'div', attrs = {'class': 'movieplay'})
if not f:
f = client.parseDOM(result, 'div', attrs={'class': 'embed2'})
f = client.parseDOM(f, 'div')
f = client.parseDOM(f, 'iframe', ret='data-lazy-src')
dupes = []
for u in f:
try:
sid = urlparse.parse_qs(urlparse.urlparse(u).query)['id'][0]
if sid in dupes: raise Exception()
dupes.append(sid)
if 'stream/ol.php' in u:
url = client.request(u, timeout='10', XHR=True, referer=u)
url = client.parseDOM(url, 'iframe', ret='src')[0]
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
if 'stream/play.php' in u:
url = client.request(u, timeout='10', XHR=True, referer=u)
url = client.parseDOM(url, 'a', ret='href')
url = [i for i in url if '.php' in i][0]
url = 'http:' + url if url.startswith('//') else url
url = client.request(url, timeout='10', XHR=True, referer=u)
url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
links = json.loads('[' + url + ']')
for i in links:
try:
quality = re.findall('(\d+)', i['label'])[0]
if int(quality) >= 1080: quality = '1080p'
elif 720 <= int(quality) < 1080: quality = 'HD'
else: quality = 'SD'
try:
quality = directstream.googletag(i['file'])[0]['quality']
except:
pass
sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i['file'], 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| gpl-2.0 |
h3llrais3r/Auto-Subliminal | lib/enzyme/parsers/ebml/core.py | 74 | 9965 | # -*- coding: utf-8 -*-
from ...exceptions import ReadError
from .readers import *
from pkg_resources import resource_stream # @UnresolvedImport
from xml.dom import minidom
import logging
__all__ = ['INTEGER', 'UINTEGER', 'FLOAT', 'STRING', 'UNICODE', 'DATE', 'MASTER', 'BINARY',
'SPEC_TYPES', 'READERS', 'Element', 'MasterElement', 'parse', 'parse_element',
'get_matroska_specs']
logger = logging.getLogger(__name__)
# EBML types
INTEGER, UINTEGER, FLOAT, STRING, UNICODE, DATE, MASTER, BINARY = range(8)
# Spec types to EBML types mapping
SPEC_TYPES = {
'integer': INTEGER,
'uinteger': UINTEGER,
'float': FLOAT,
'string': STRING,
'utf-8': UNICODE,
'date': DATE,
'master': MASTER,
'binary': BINARY
}
# Readers to use per EBML type
READERS = {
INTEGER: read_element_integer,
UINTEGER: read_element_uinteger,
FLOAT: read_element_float,
STRING: read_element_string,
UNICODE: read_element_unicode,
DATE: read_element_date,
BINARY: read_element_binary
}
class Element(object):
"""Base object of EBML
:param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element)
:param type: type of the element
:type type: :data:`INTEGER`, :data:`UINTEGER`, :data:`FLOAT`, :data:`STRING`, :data:`UNICODE`, :data:`DATE`, :data:`MASTER` or :data:`BINARY`
:param string name: name of the element
:param int level: level of the element
:param int position: position of element's data
:param int size: size of element's data
:param data: data as read by the corresponding :data:`READERS`
"""
def __init__(self, id=None, type=None, name=None, level=None, position=None, size=None, data=None): # @ReservedAssignment
self.id = id
self.type = type
self.name = name
self.level = level
self.position = position
self.size = size
self.data = data
def __repr__(self):
return '<%s [%s, %r]>' % (self.__class__.__name__, self.name, self.data)
class MasterElement(Element):
"""Element of type :data:`MASTER` that has a list of :class:`Element` as its data
:param int id: id of the element, best represented as hexadecimal (0x18538067 for Matroska Segment element)
:param string name: name of the element
:param int level: level of the element
:param int position: position of element's data
:param int size: size of element's data
:param data: child elements
:type data: list of :class:`Element`
:class:`MasterElement` implements some magic methods to ease manipulation. Thus, a MasterElement supports
the `in` keyword to test for the presence of a child element by its name and gives access to it
with a container getter::
>>> ebml_element = parse(open('test1.mkv', 'rb'), get_matroska_specs())[0]
>>> 'EBMLVersion' in ebml_element
False
>>> 'DocType' in ebml_element
True
>>> ebml_element['DocType']
Element(DocType, u'matroska')
"""
def __init__(self, id=None, name=None, level=None, position=None, size=None, data=None): # @ReservedAssignment
super(MasterElement, self).__init__(id, MASTER, name, level, position, size, data)
def load(self, stream, specs, ignore_element_types=None, ignore_element_names=None, max_level=None):
"""Load children :class:`Elements <Element>` with level lower or equal to the `max_level`
from the `stream` according to the `specs`
:param stream: file-like object from which to read
:param dict specs: see :ref:`specs`
:param int max_level: maximum level for children elements
:param list ignore_element_types: list of element types to ignore
:param list ignore_element_names: list of element names to ignore
:param int max_level: maximum level of elements
"""
self.data = parse(stream, specs, self.size, ignore_element_types, ignore_element_names, max_level)
def get(self, name, default=None):
"""Convenience method for ``master_element[name].data if name in master_element else default``
:param string name: the name of the child to get
:param default: default value if `name` is not in the :class:`MasterElement`
:return: the data of the child :class:`Element` or `default`
"""
if name not in self:
return default
element = self[name]
if element.type == MASTER:
raise ValueError('%s is a MasterElement' % name)
return element.data
def __getitem__(self, key):
if isinstance(key, int):
return self.data[key]
children = [e for e in self.data if e.name == key]
if not children:
raise KeyError(key)
if len(children) > 1:
raise KeyError('More than 1 child with key %s (%d)' % (key, len(children)))
return children[0]
def __contains__(self, item):
return len([e for e in self.data if e.name == item]) > 0
def __iter__(self):
return iter(self.data)
def parse(stream, specs, size=None, ignore_element_types=None, ignore_element_names=None, max_level=None):
"""Parse a stream for `size` bytes according to the `specs`
:param stream: file-like object from which to read
:param size: maximum number of bytes to read, None to read all the stream
:type size: int or None
:param dict specs: see :ref:`specs`
:param list ignore_element_types: list of element types to ignore
:param list ignore_element_names: list of element names to ignore
:param int max_level: maximum level of elements
:return: parsed data as a tree of :class:`~enzyme.parsers.ebml.core.Element`
:rtype: list
.. note::
If `size` is reached in a middle of an element, reading will continue
until the element is fully parsed.
"""
ignore_element_types = ignore_element_types if ignore_element_types is not None else []
ignore_element_names = ignore_element_names if ignore_element_names is not None else []
start = stream.tell()
elements = []
while size is None or stream.tell() - start < size:
try:
element = parse_element(stream, specs)
if element is None:
continue
logger.debug('%s %s parsed', element.__class__.__name__, element.name)
if element.type in ignore_element_types or element.name in ignore_element_names:
logger.info('%s %s ignored', element.__class__.__name__, element.name)
if element.type == MASTER:
stream.seek(element.size, 1)
continue
if element.type == MASTER:
if max_level is not None and element.level >= max_level:
logger.info('Maximum level %d reached for children of %s %s', max_level, element.__class__.__name__, element.name)
stream.seek(element.size, 1)
else:
logger.debug('Loading child elements for %s %s with size %d', element.__class__.__name__, element.name, element.size)
element.data = parse(stream, specs, element.size, ignore_element_types, ignore_element_names, max_level)
elements.append(element)
except ReadError:
if size is not None:
raise
break
return elements
def parse_element(stream, specs, load_children=False, ignore_element_types=None, ignore_element_names=None, max_level=None):
"""Extract a single :class:`Element` from the `stream` according to the `specs`
:param stream: file-like object from which to read
:param dict specs: see :ref:`specs`
:param bool load_children: load children elements if the parsed element is a :class:`MasterElement`
:param list ignore_element_types: list of element types to ignore
:param list ignore_element_names: list of element names to ignore
:param int max_level: maximum level for children elements
:return: the parsed element
:rtype: :class:`Element`
"""
ignore_element_types = ignore_element_types if ignore_element_types is not None else []
ignore_element_names = ignore_element_names if ignore_element_names is not None else []
element_id = read_element_id(stream)
if element_id is None:
raise ReadError('Cannot read element id')
element_size = read_element_size(stream)
if element_size is None:
raise ReadError('Cannot read element size')
if element_id not in specs:
logger.error('Element with id 0x%x is not in the specs' % element_id)
stream.seek(element_size, 1)
return None
element_type, element_name, element_level = specs[element_id]
if element_type == MASTER:
element = MasterElement(element_id, element_name, element_level, stream.tell(), element_size)
if load_children:
element.data = parse(stream, specs, element.size, ignore_element_types, ignore_element_names, max_level)
else:
element = Element(element_id, element_type, element_name, element_level, stream.tell(), element_size)
element.data = READERS[element_type](stream, element_size)
return element
def get_matroska_specs(webm_only=False):
"""Get the Matroska specs
:param bool webm_only: load *only* WebM specs
:return: the specs in the appropriate format. See :ref:`specs`
:rtype: dict
"""
specs = {}
with resource_stream(__name__, 'specs/matroska.xml') as resource:
xmldoc = minidom.parse(resource)
for element in xmldoc.getElementsByTagName('element'):
if not webm_only or element.hasAttribute('webm') and element.getAttribute('webm') == '1':
specs[int(element.getAttribute('id'), 16)] = (SPEC_TYPES[element.getAttribute('type')], element.getAttribute('name'), int(element.getAttribute('level')))
return specs
| gpl-3.0 |
kc-lab/dms2dfe | dms2dfe/lib/plot_mut_data_scatter.py | 2 | 17844 | #!usr/bin/python
# Copyright 2016, Rohan Dandage <rraadd_8@hotmail.com,rohan@igib.in>
# This program is distributed under General Public License v. 3.
"""
================================
``plot_mut_data_scatter``
================================
"""
import sys
from os.path import splitext,exists,basename
from os import makedirs,stat
import pandas as pd
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
matplotlib.rcParams['axes.unicode_minus']=False
matplotlib.use('Agg') # no Xwindows
import matplotlib.pyplot as plt
# matplotlib.style.use('ggplot')
import logging
logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..): %(message)s',level=logging.DEBUG) #
# from dms2dfe.lib.io_strs import make_pathable_string
from dms2dfe.lib.io_plots import saveplot,get_axlims
from dms2dfe.lib.io_dfs import set_index,denanrows
def gettopnlastdiff(data,col1,col2,zcol=None,rows=5,zcol_threshold=None,
col_classes=None,classes=[]):
"""
Plot difference between top mutants
:param data: pandas dataframe
:param col1: name of column1
:param col2: name of column2
"""
data.loc[:,'diff']=data.loc[:,col2]-data.loc[:,col1]
if zcol is None:
data_heads=data.sort_values(by='diff',ascending=True).head(rows)#.index
data_tails=data.sort_values(by='diff',ascending=True).tail(rows)#.index
else:
if not zcol_threshold is None:
data=data.loc[(data.loc[:,zcol]<zcol_threshold),:]
# df.sort_values(['a', 'b'], ascending=[True, False])
data_heads=data.sort_values(by=['diff',zcol],ascending=[True,True]).head(rows)#.index
data_tails=data.sort_values(by=['diff',zcol],ascending=[True,False]).tail(rows)#.index
if not col_classes is None:
data_heads=data_heads.loc[(data_heads.loc[:,col_classes]==classes[0]),:]
data_tails=data_tails.loc[(data_tails.loc[:,col_classes]==classes[1]),:]
return data_heads.index,data_tails.index,
from dms2dfe.lib.plot_mut_data import data2mut_matrix,data2sub_matrix
# from dms2dfe.lib.io_mut_files import concat_cols
from dms2dfe.lib.io_plots import repel_labels
def plot_sc(data,ax,xcol,ycol,ylabel='',
heads=[],tails=[],repel=0.045,
annot_headtails=True,
color_sca=None,
color_dots='both',
color_heads='r',color_tails='b',
zcol=None,
zcol_threshold=None,
diagonal=True,
space=0.2,
axlims=None,
):
"""
Plot scatter
:param data: pandas dataframe
:param ax: axes object
:param xcol: column name of x data
:param ycol: column name of y data
"""
if (not zcol is None) and (sum(~pd.isnull(data.loc[:,zcol]))==0):
zcol=None
if zcol is None:
ax=data.plot.scatter(xcol,ycol,edgecolor='none',alpha=0.6,
c='yellowgreen',
ax=ax)
else:
data=data.sort_values(by=zcol,ascending=False)
ax.scatter(x=data.loc[:,xcol],
y=data.loc[:,ycol],
edgecolor='none',
alpha=0.6,
c=data.loc[:,zcol],
cmap='summer_r',
)
ax.set_xlabel(xcol)
if len(heads)==0 and len(tails)==0:
if annot_headtails:
heads,tails=gettopnlastdiff(data,ycol,xcol,zcol=zcol,zcol_threshold=zcol_threshold)
color_sca='none'
color_edge='royalblue'
if (color_dots=='heads') or (color_dots=='both'):
ax.scatter(x=data.loc[heads,xcol],y=data.loc[heads,ycol],
edgecolor=color_edge,
facecolors=color_sca,
)
try:
repel_labels(ax, data.loc[heads, xcol], data.loc[heads, ycol], heads, k=repel,label_color=color_heads)
except:
for s in heads:
ax.text(data.loc[s, xcol], data.loc[s, ycol], s,color=color_heads)
if (color_dots=='tails') or (color_dots=='both'):
ax.scatter(x=data.loc[tails,xcol],y=data.loc[tails,ycol],
edgecolor=color_edge,
facecolors=color_sca,
)
try:
repel_labels(ax, data.loc[tails, xcol], data.loc[tails, ycol], tails, k=repel,label_color=color_tails)
except:
for s in tails:
if s in data.index:
ax.text(data.loc[s, xcol], data.loc[s, ycol], s,color=color_tails)
ax.set_ylabel(ylabel)
if diagonal:
ax.plot([100,-100],[100,-100],linestyle='-',color='darkgray',zorder=0)
if axlims is None:
xlims,ylims=get_axlims(data.loc[:,xcol],data.loc[:,ycol],space=space)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
axlims=[xlims,ylims]
else:
ax.set_xlim(axlims[0])
ax.set_ylim(axlims[1])
return ax,heads,tails,axlims
def mutids2refrei(mutid):
"""
Convert mutids to reference amino acid and index
:param mutid: mutation ID
"""
return mutid[:-1]
def mutids2subid(mutid):
"""
Convert mutids to substitution id
:param mutid: mutation ID
"""
return mutid[0]+mutid[-1]
def plot_scatter_mutilayered(data_all,xcol,ycol,
mutids_heads=[],mutids_tails=[],
repel=0.045,
color_dots='both',
annot_headtails=True,
color_heads='r',color_tails='b',
note_text='',
stds_sub_pos=None,
col_z_mutations=None,
zcol_threshold=None,
errorbars=False,
diagonal=True,
space=0.2,
figsize=[8.5,6],
plot_fh=None,):
"""
Plot multi-layered scatter
:param data_all: pandas dataframe
:param xcol: column name of x data
:param ycol: column name of y data
"""
# print data_all.shape
if 'mutids' in data_all:
data_all=data_all.set_index('mutids')
data_all_mut=data_all.copy()
if not col_z_mutations is None:
data_all=data_all.drop(col_z_mutations,axis=1)
# sum(~pd.isnull(data_all_mut.loc[:,col_z_mutations]))
data_all_pos=pd.concat([data2mut_matrix(data_all.reset_index(),xcol,'mut','aas').mean(),
data2mut_matrix(data_all.reset_index(),ycol,'mut','aas').mean(),
data2mut_matrix(data_all.reset_index(),xcol,'mut','aas').std(),
data2mut_matrix(data_all.reset_index(),ycol,'mut','aas').std(),
],axis=1)
data_all_pos.columns=[xcol,ycol,xcol+'std',ycol+'std']
data_all_sub=pd.concat([data2sub_matrix(data_all,xcol,'mut','aas',aggfunc='mean').unstack(),
data2sub_matrix(data_all,ycol,'mut','aas',aggfunc='mean').unstack(),
data2sub_matrix(data_all,xcol,'mut','aas',aggfunc='std').unstack(),
data2sub_matrix(data_all,ycol,'mut','aas',aggfunc='std').unstack(),
],axis=1)
data_all_sub.columns=[xcol,ycol,xcol+'std',ycol+'std']
data_all_sub=denanrows(data_all_sub)
if not 'Wild type' in data_all_sub:
data_all_sub=data_all_sub.reset_index()
mutids=[]
for i in data_all_sub.index:
mutids.append('%s%s' % (data_all_sub.reset_index().loc[i,'Wild type'],
data_all_sub.reset_index().loc[i,'Mutation to']))
data_all_sub.loc[:,'mutids']=mutids
data_all_sub=data_all_sub.set_index('mutids')
fig=plt.figure(figsize=figsize)
ax1=plt.subplot(131)
ax2=plt.subplot(132)
ax3=plt.subplot(133)
if errorbars:
ax2.errorbar(data_all_sub.loc[:,xcol],data_all_sub.loc[:,ycol],
xerr=data_all_sub.loc[:,xcol+'std'],
yerr=data_all_sub.loc[:,ycol+'std'],
fmt="none",ecolor='gray',alpha=0.15,
capthick=0,
zorder=0)
ax3.errorbar(data_all_pos.loc[:,xcol],data_all_pos.loc[:,ycol],
xerr=data_all_pos.loc[:,xcol+'std'],
yerr=data_all_pos.loc[:,ycol+'std'],
fmt="none",ecolor='gray',alpha=0.15,
capthick=0,
zorder=0)
ax1,mutids_heads,mutids_tails,axlims=plot_sc(data_all_mut,ax1,xcol,ycol,ylabel=ycol,
heads=mutids_heads,tails=mutids_tails,
annot_headtails=False,
zcol=col_z_mutations,zcol_threshold=zcol_threshold,
repel=repel,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,)
repel_sub=repel*len(data_all_sub)/(len(data_all_mut))*5
if repel_sub>repel:
repel_sub=repel
# print repel_sub
# print data_all_sub.columns
# data_all_sub=denanrows(data_all_sub)
data_all_sub=data_all_sub.loc[denanrows(data_all_sub.loc[:,[xcol,ycol]]).index.tolist(),:]
# print data_all_sub.shape
ax2,_,_,_=plot_sc(data_all_sub,ax2,xcol,ycol,
heads=[mutids2subid(i) for i in mutids_heads],tails=[mutids2subid(i) for i in mutids_tails],
annot_headtails=False,
repel=repel_sub,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,
# axlims=axlims
)
repel_pos=repel*len(data_all_pos)/(len(data_all_mut))*12.5
if repel_pos>repel:
repel_pos=repel
# print repel_pos
ax3,_,_,_=plot_sc(data_all_pos,ax3,xcol,ycol,
heads=[mutids2refrei(i) for i in mutids_heads],tails=[mutids2refrei(i) for i in mutids_tails],
annot_headtails=False,
repel=repel_pos,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,
# axlims=axlims
)
ax1.set_title('Mutations',color="gray")
ax2.set_title('Substitutions',color="gray")
ax3.set_title('Positions',color="gray")
fig.suptitle(note_text, fontsize=15,color="k")
saveplot(plot_fh,form='both',transparent=False)
return data_all_mut,data_all_sub,data_all_pos
def set_title_higher(axes,labels,height=1.2,color='k'):
"""
Set title higher
:param axes: list of axes object
:param labels: list of labels for titles
"""
for i in range(len(labels)):
ax=axes[i]
label=labels[i]
x=ax.get_xlim()[0]
y=ax.get_ylim()[0]+(ax.get_ylim()[1]-ax.get_ylim()[0])*height
ax.text(x,y,label,
color=color,fontsize=15)
def data_comparison2scatter_mutilayered(data,data_label,color_dots=None,
mutids_heads=[],mutids_tails=[],
col_filter=None,
note_text=None,
col_pvals=None,
repel=0.045,
figsize=[15,5],
plot_fh=None):
"""
Wrapper to plot multi layered scatter plot
:param data: pandas dataframe
:param data_label: label of the data
"""
from dms2dfe.lib.io_strs import splitlabel
# print data.shape
data=set_index(data,'mutids')
labels=splitlabel(data_label,splitby=' versus ',ctrl='$37^{0}$C')
if not note_text is None:
labels=["%s (%s)" % (l,note_text) for l in labels]
data.loc[:,labels[0]]=data.loc[:,'Fi_test']
data.loc[:,labels[1]]=data.loc[:,'Fi_ctrl']
if not col_pvals is None:
data.loc[:,col_pvals]=np.log10(data.loc[:,col_pvals])
if not data.index.name=='mutids':
data.index.name='mutids'
# print data.index
zcol_threshold=np.log10(0.01)
if not col_filter is None:
data.loc[data.loc[:,col_filter],labels]
cols=['mut','ref']+labels
if not col_pvals is None:
cols=cols+[col_pvals]
data=denanrows(data.loc[:,cols])
# print data.shape
# print data.index.name
# print data.columns.tolist()
plot_scatter_mutilayered(data,labels[1],labels[0],
plot_fh=plot_fh,
color_dots=color_dots,
mutids_heads=mutids_heads,
mutids_tails=mutids_tails,
color_heads='b',color_tails='b',
col_z_mutations=col_pvals,
zcol_threshold=0.05,
repel=repel,
figsize=figsize,#[6.375,4.5],
)
def plot_mulitilayered_scatter_per_class_comparison(prj_dh,
data_fns,data_labels,
filter_selection=None,
data_sections_pvals=None,
fns2sides=None,
filter_sections=None,
filter_signi=True,
col_pvals=None,
col_filter=None,
figsize=[9,3],
force=False):
"""
Wrapper to plot multi layered scatter from data_comparison
:param prj_dh: path to the project directory
:param data_fns: list of filenames
:param data_labels: list of corresponding labels
"""
plot_type='scatter_mutilayered_per_class_comparison'
dtype='data_comparison'
data_mutants_select=pd.DataFrame()
for i in range(len(data_labels)):
data_fn=data_fns[i]
data_label=data_labels[i]
data_fh='%s/data_comparison/aas/%s' % (prj_dh,data_fn)
data_comparison=pd.read_csv(data_fh).set_index('mutids')
data_plot=data_comparison.copy()
print len(denanrows(data_plot.loc[:,'class_comparison']))
if (filter_selection=='by_side'):
selection=fns2sides[data_fn]
else:# (filter_selection=='signi'):
selection=data_sections_pvals.loc[data_label,'selection']
# pval=data_sections_pvals.loc[data_label,'All']
if selection=='positive':
color_dots='heads'
elif selection=='negative':
color_dots='tails'
print color_dots
if ((filter_selection=='signi') or (filter_selection=='by_side')):
data_comparison=data_comparison.loc[(data_comparison.loc[:,'class_comparison']==selection),:]
else:
data_comparison=data_comparison.loc[((data_comparison.loc[:,'class_comparison']=='positive')\
| (data_comparison.loc[:,'class_comparison']=='negative')),:]
# data_plot=data_comparison.copy()
if filter_sections==True:
data_comparison=data_comparison.loc[~pd.isnull(data_comparison.loc[:,'sectionn']),:]
sectionn='True'
elif filter_sections=='signi':
sectionn=data_sections_pvals.loc[data_label,'Significant section']
data_comparison=data_comparison.loc[(data_comparison.loc[:,'sectionn']==sectionn),:]
# get intersect of (mutids significant section) and (class of selection)
else:
sectionn='all'
if filter_signi:
data_comparison.loc[pd.isnull(data_comparison.loc[:,'Significant']),'Significant']=False
data_comparison=data_comparison.loc[data_comparison.loc[:,'Significant'],:]
# by lowest of multiplication of pvals (only empiric)
zcol='z'
xcol='FiA_ctrl'
ycol='FiA_test'
data_comparison.loc[:,zcol]=data_comparison.loc[:,'padj_test']*data_comparison.loc[:,'padj_ctrl']
# get top 5
# data_comparison.to_csv('test.csv')
mutids_heads,mutids_tails=gettopnlastdiff(data_comparison,ycol,xcol,
# zcol=zcol
col_classes='class_comparison',
classes=['positive','negative']
)
data_comparison.loc[:,'data_label']=data_label
data_comparison.loc[:,'data_fn']=data_fn
data_mutants_select=data_mutants_select.append(data_comparison.loc[mutids_heads+mutids_tails,:])
plot_fh='%s/plots/aas/fig_%s_section_%s_%s.pdf' % (prj_dh,plot_type,sectionn.replace(',','_'),data_fn)
print plot_fh
print mutids_heads
print mutids_tails
if not exists(plot_fh) or force:
note_text=None
data_comparison2scatter_mutilayered(data_plot,data_label,
color_dots,note_text=note_text,
plot_fh=plot_fh,
mutids_heads=mutids_heads,
mutids_tails=mutids_tails,
col_pvals=col_pvals,
repel=0.08,
figsize=figsize,
)
return data_mutants_select | gpl-3.0 |
wileeam/airflow | airflow/providers/apache/cassandra/hooks/cassandra.py | 4 | 7914 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains hook to integrate with Apache Cassandra.
"""
from typing import Any, Dict, Union
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster, Session
from cassandra.policies import (
DCAwareRoundRobinPolicy, RoundRobinPolicy, TokenAwarePolicy, WhiteListRoundRobinPolicy,
)
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
Policy = Union[DCAwareRoundRobinPolicy, RoundRobinPolicy, TokenAwarePolicy, WhiteListRoundRobinPolicy]
class CassandraHook(BaseHook, LoggingMixin):
"""
Hook used to interact with Cassandra
Contact points can be specified as a comma-separated string in the 'hosts'
field of the connection.
Port can be specified in the port field of the connection.
If SSL is enabled in Cassandra, pass in a dict in the extra field as kwargs for
``ssl.wrap_socket()``. For example::
{
'ssl_options' : {
'ca_certs' : PATH_TO_CA_CERTS
}
}
Default load balancing policy is RoundRobinPolicy. To specify a different
LB policy::
- DCAwareRoundRobinPolicy
{
'load_balancing_policy': 'DCAwareRoundRobinPolicy',
'load_balancing_policy_args': {
'local_dc': LOCAL_DC_NAME, // optional
'used_hosts_per_remote_dc': SOME_INT_VALUE, // optional
}
}
- WhiteListRoundRobinPolicy
{
'load_balancing_policy': 'WhiteListRoundRobinPolicy',
'load_balancing_policy_args': {
'hosts': ['HOST1', 'HOST2', 'HOST3']
}
}
- TokenAwarePolicy
{
'load_balancing_policy': 'TokenAwarePolicy',
'load_balancing_policy_args': {
'child_load_balancing_policy': CHILD_POLICY_NAME, // optional
'child_load_balancing_policy_args': { ... } // optional
}
}
For details of the Cluster config, see cassandra.cluster.
"""
def __init__(self, cassandra_conn_id: str = 'cassandra_default'):
conn = self.get_connection(cassandra_conn_id)
conn_config = {}
if conn.host:
conn_config['contact_points'] = conn.host.split(',')
if conn.port:
conn_config['port'] = int(conn.port)
if conn.login:
conn_config['auth_provider'] = PlainTextAuthProvider(
username=conn.login, password=conn.password)
policy_name = conn.extra_dejson.get('load_balancing_policy', None)
policy_args = conn.extra_dejson.get('load_balancing_policy_args', {})
lb_policy = self.get_lb_policy(policy_name, policy_args)
if lb_policy:
conn_config['load_balancing_policy'] = lb_policy
cql_version = conn.extra_dejson.get('cql_version', None)
if cql_version:
conn_config['cql_version'] = cql_version
ssl_options = conn.extra_dejson.get('ssl_options', None)
if ssl_options:
conn_config['ssl_options'] = ssl_options
self.cluster = Cluster(**conn_config)
self.keyspace = conn.schema
self.session = None
def get_conn(self) -> Session:
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session
def get_cluster(self) -> Cluster:
"""
Returns Cassandra cluster.
"""
return self.cluster
def shutdown_cluster(self) -> None:
"""
Closes all sessions and connections associated with this Cluster.
"""
if not self.cluster.is_shutdown:
self.cluster.shutdown()
@staticmethod
def get_lb_policy(policy_name: str, policy_args: Dict[str, Any]) -> Policy:
"""
Creates load balancing policy.
:param policy_name: Name of the policy to use.
:type policy_name: str
:param policy_args: Parameters for the policy.
:type policy_args: Dict
"""
if policy_name == 'DCAwareRoundRobinPolicy':
local_dc = policy_args.get('local_dc', '')
used_hosts_per_remote_dc = int(policy_args.get('used_hosts_per_remote_dc', 0))
return DCAwareRoundRobinPolicy(local_dc, used_hosts_per_remote_dc)
if policy_name == 'WhiteListRoundRobinPolicy':
hosts = policy_args.get('hosts')
if not hosts:
raise Exception('Hosts must be specified for WhiteListRoundRobinPolicy')
return WhiteListRoundRobinPolicy(hosts)
if policy_name == 'TokenAwarePolicy':
allowed_child_policies = ('RoundRobinPolicy',
'DCAwareRoundRobinPolicy',
'WhiteListRoundRobinPolicy',)
child_policy_name = policy_args.get('child_load_balancing_policy',
'RoundRobinPolicy')
child_policy_args = policy_args.get('child_load_balancing_policy_args', {})
if child_policy_name not in allowed_child_policies:
return TokenAwarePolicy(RoundRobinPolicy())
else:
child_policy = CassandraHook.get_lb_policy(child_policy_name,
child_policy_args)
return TokenAwarePolicy(child_policy)
# Fallback to default RoundRobinPolicy
return RoundRobinPolicy()
def table_exists(self, table: str) -> bool:
"""
Checks if a table exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
cluster_metadata = self.get_conn().cluster.metadata
return (keyspace in cluster_metadata.keyspaces and
table in cluster_metadata.keyspaces[keyspace].tables)
def record_exists(self, table: str, keys: Dict[str, str]) -> bool:
"""
Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
ks_str = " AND ".join(f"{key}=%({key})s" for key in keys.keys())
query = f"SELECT * FROM {keyspace}.{table} WHERE {ks_str}"
try:
result = self.get_conn().execute(query, keys)
return result.one() is not None
except Exception: # pylint: disable=broad-except
return False
| apache-2.0 |
shrikantgond/ecView | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| apache-2.0 |
wuoliver/MissionPlanner | ExtLibs/Mavlink/pymavlink/generator/lib/genxmlif/xmlif4Dom.py | 79 | 5669 | #
# genxmlif, Release 0.9.0
# file: xmlif4Dom.py
#
# XML interface class to the 4DOM library
#
# history:
# 2005-04-25 rl created
# 2008-07-01 rl Limited support of XInclude added
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generix XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import urllib
from xml.dom.ext.reader.Sax2 import Reader, XmlDomGenerator
from xml.sax._exceptions import SAXParseException
from ..genxmlif import XMLIF_4DOM, GenXmlIfError
from xmlifUtils import convertToAbsUrl
from xmlifDom import XmlInterfaceDom, XmlIfBuilderExtensionDom, InternalDomTreeWrapper, InternalDomElementWrapper
class XmlInterface4Dom (XmlInterfaceDom):
#####################################################
# for description of the interface methods see xmlifbase.py
#####################################################
def __init__ (self, verbose, useCaching, processXInclude):
XmlInterfaceDom.__init__ (self, verbose, useCaching, processXInclude)
self.xmlIfType = XMLIF_4DOM
if self.verbose:
print "Using 4Dom interface module..."
def parse (self, file, baseUrl="", internalOwnerDoc=None):
absUrl = convertToAbsUrl (file, baseUrl)
fp = urllib.urlopen (absUrl)
return self._parseStream (fp, file, absUrl, internalOwnerDoc)
def parseString (self, text, baseUrl="", internalOwnerDoc=None):
import cStringIO
fp = cStringIO.StringIO(text)
absUrl = convertToAbsUrl ("", baseUrl)
return self._parseStream (fp, "", absUrl, internalOwnerDoc)
def _parseStream (self, fp, file, absUrl, internalOwnerDoc):
reader = Reader(validate=0, keepAllWs=0, catName=None,
saxHandlerClass=ExtXmlDomGenerator, parser=None)
reader.handler.extinit(file, absUrl, reader.parser, self)
if internalOwnerDoc != None:
ownerDoc = internalOwnerDoc.document
else:
ownerDoc = None
try:
tree = reader.fromStream(fp, ownerDoc)
fp.close()
except SAXParseException, errInst:
fp.close()
raise GenXmlIfError, "%s: SAXParseException: %s" %(file, str(errInst))
treeWrapper = reader.handler.treeWrapper
# XInclude support
if self.processXInclude:
if internalOwnerDoc == None:
internalOwnerDoc = treeWrapper.getTree()
self.xInclude (treeWrapper.getRootNode(), absUrl, internalOwnerDoc)
return treeWrapper
###################################################
# Extended DOM generator class derived from XmlDomGenerator
# extended to store related line numbers, file/URL names and
# defined namespaces in the node object
class ExtXmlDomGenerator(XmlDomGenerator, XmlIfBuilderExtensionDom):
def __init__(self, keepAllWs=0):
XmlDomGenerator.__init__(self, keepAllWs)
self.treeWrapper = None
def extinit (self, filePath, absUrl, parser, xmlIf):
self.filePath = filePath
self.absUrl = absUrl
self.parser = parser
self.xmlIf = xmlIf
def startElement(self, name, attribs):
XmlDomGenerator.startElement(self, name, attribs)
if not self.treeWrapper:
self.treeWrapper = self.xmlIf.treeWrapperClass(self, InternalDomTreeWrapper(self._rootNode), self.xmlIf.useCaching)
XmlIfBuilderExtensionDom.__init__(self, self.filePath, self.absUrl, self.treeWrapper, self.xmlIf.elementWrapperClass)
curNode = self._nodeStack[-1]
internal4DomElementWrapper = InternalDomElementWrapper(curNode, self.treeWrapper.getTree())
curNs = self._namespaces.items()
try:
curNs.remove( (None,None) )
except:
pass
XmlIfBuilderExtensionDom.startElementHandler (self, internal4DomElementWrapper, self.parser.getLineNumber(), curNs)
def endElement(self, name):
curNode = self._nodeStack[-1]
XmlIfBuilderExtensionDom.endElementHandler (self, curNode.xmlIfExtInternalWrapper, self.parser.getLineNumber())
XmlDomGenerator.endElement(self, name)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.