repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
RedHatQE/cfme_tests
cfme/fixtures/node_annotate.py
1
4124
import csv import os from operator import itemgetter import py import pytest import yaml from .pytest_store import store from cfme.utils.conf import cfme_data from cfme.utils.path import project_path class MarkFromMap(object): def __init__(self, mark_map): self.mark_map = mark_map def pytest_itemcollected(self, item): mark = self.mark_map.get(item.nodeid) if mark is not None: # todo: warn when the applied marker differs from the data if not item.get_marker(mark.name): item.add_marker(mark) @classmethod def from_parsed_list(cls, parsed, key, map_value): data = dict(map(itemgetter('id', key), parsed)) mark_map = dict((k, map_value(v)) for k, v in data.items()) return cls(mark_map) def pytest_configure(config): path = cfme_data.get('cfme_annotations_path') if path: to_parse = project_path.join(path) parsed = parse(to_parse) if not parsed: store.terminalreporter.line( 'no test annotation found in {}'.format(to_parse), yellow=True) else: store.terminalreporter.line('no test annotation found in {}'.format(path), yellow=True) parsed = [] config.pluginmanager.register(MarkFromMap.from_parsed_list( parsed, 'tier', pytest.mark.tier)) config.pluginmanager.register(MarkFromMap.from_parsed_list( parsed, 'requirement', pytest.mark.requirement)) config.pluginmanager.register(MarkFromMap.from_parsed_list(parsed, 'type', pytest.mark.__getattr__)) def pytest_addoption(parser): group = parser.getgroup('cfme') group.addoption('--tier', type=int, action='append', help='only run tests of the given tiers') group.addoption('--requirement', type=str, action='append', help='only run tests of the given requirements') def tier_matches(item, tiers): mark = item.get_marker('tier') if getattr(mark, 'args', None) is None: return False return mark.args[0] in tiers def requirement_matches(item, requirements): mark = item.get_marker('requirement') if getattr(mark, 'args', None) is None: return False return mark.args[0] in requirements def pytest_collection_modifyitems(config, items): tiers = config.getoption('tier') requirements = config.getoption('requirement') if not tiers and not requirements: return # TODO(rpfannsc) trim after pytest #1373 is done keep, discard = [], [] for item in items: if tiers and not tier_matches(item, tiers): discard.append(item) continue elif requirements and not requirement_matches(item, requirements): discard.append(item) continue else: keep.append(item) items[:] = keep # TODO(rpfannsc) add a reason after pytest #1372 is fixed config.hook.pytest_deselected(items=discard) def generate_nodeid(mapping): title = mapping['Title'] caseid = mapping['Test Case ID'] if not caseid: raise ValueError('incomplete entry') needle = title.find('[') attribute_part = title[:needle].replace('.', '::') parameter_part = title[needle:] if os.sep not in caseid: file_part = caseid[:-needle - 1].replace('.', os.sep) else: file_part = caseid return "{}.py::{}{}".format(file_part, attribute_part, parameter_part) def _clean(mapping): mapping.pop('', '') try: return { 'requirement': int(mapping['Requirement']), 'tier': int(mapping['TestTier']), 'id': generate_nodeid(mapping), 'type': mapping['TestType'].lower(), } except (TypeError, ValueError): return None def parse(path): if not path.check(): return [] with path.open() as fp: return filter(None, map(_clean, csv.DictReader(fp))) if __name__ == '__main__': mapping_file = project_path.join(py.std.sys.argv[1]) print(yaml.safe_dump(parse(mapping_file), default_flow_style=False))
gpl-2.0
tobias-lang/crawl
src/run_crawl_and_parse.py
1
2704
import sys import traceback import parse.link_parser import parse.impl.zeit_title_parser def is_interesting_url(url): if url.rfind("?")>0: return False if "#comments" in url: return False if "#" in url: return False if "/201" not in url: return False return True def filter_links(links, pagesVisited, domain): links = filter(lambda x: x not in pagesVisited, links) links = filter(lambda x: domain in x, links) links = filter(lambda x: is_interesting_url(x), links) return links def write_website(data, filename): import codecs try: f = codecs.open(filename, "w", "utf-8") f.write(data) f.close() except Exception as e: print(traceback.format_exc()) def spider(domain, article_parser, maxPages, outPath): pagesVisited = set() pagesToVisit = [domain] numVisited = 0 numUsed = 0 # different from numVisited (as parsing can fail) titles = [] f = open(outPath, "w") while numVisited < maxPages and pagesToVisit != []: url = pagesToVisit[0] pagesToVisit = pagesToVisit[1:] if url in pagesVisited: continue try: numVisited += 1 print numVisited-1, "Visiting:", url pagesVisited.add(url) parser = parse.link_parser.LinkParser() page_data, page_links = parser.getLinks(url) page_links = filter_links(page_links, pagesVisited, domain) pagesToVisit = pagesToVisit + page_links if "/201" in url: article_parser.reset() article_parser.feed(page_data) title = article_parser.getContents() titles.append(title.encode("utf-8")) numUsed += 1 except Exception as e: print(traceback.format_exc()) if len(titles) >= 100: for t in titles: # s = unicode(t).encode('utf8') f.write(t + "\n") f.flush() titles = [] f.close() print titles print "done" if __name__ == '__main__': domain = "http://www.zeit.de" maxPages = 1000 outPath = "../out/titles.txt" article_parser = parse.impl.zeit_title_parser.TitleParser() # if len(sys.argv) < 4: # print "usage: python run_download_websites <domain> <maxPages> <outDir>\n" \ # "<domain> = start-url (e.g. www.zeit.de)" # exit(1) # domain = sys.argv[1] # maxPages = int(sys.argv[2]) # outDir = sys.argv[3] print "domain,", domain print "maxPages,", maxPages print "outPath", outPath contents = spider(domain, article_parser, maxPages, outPath)
gpl-3.0
jn7163/django
django/contrib/gis/gdal/base.py
654
1179
from ctypes import c_void_p from django.contrib.gis.gdal.error import GDALException from django.utils import six class GDALBase(object): """ Base object for GDAL objects that has a pointer access property that controls access to the underlying C pointer. """ # Initially the pointer is NULL. _ptr = None # Default allowed pointer type. ptr_type = c_void_p # Pointer access property. def _get_ptr(self): # Raise an exception if the pointer isn't valid don't # want to be passing NULL pointers to routines -- # that's very bad. if self._ptr: return self._ptr else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__) def _set_ptr(self, ptr): # Only allow the pointer to be set with pointers of the # compatible type or None (NULL). if isinstance(ptr, six.integer_types): self._ptr = self.ptr_type(ptr) elif ptr is None or isinstance(ptr, self.ptr_type): self._ptr = ptr else: raise TypeError('Incompatible pointer type') ptr = property(_get_ptr, _set_ptr)
bsd-3-clause
pshen/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py
36
5624
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_inventory version_added: "2.3" author: "Wayne Witzel III (@wwitzel3)" short_description: create, update, or destroy Ansible Tower inventory. description: - Create, update, or destroy Ansible Tower inventories. See U(https://www.ansible.com/tower) for an overview. options: name: description: - The name to use for the inventory. required: True description: description: - The description to use for the inventory. required: False default: null organization: description: - Organization the inventory belongs to. required: True variables: description: - Inventory variables. Use '@' to get from file. required: False default: null state: description: - Desired state of the resource. required: False default: "present" choices: ["present", "absent"] tower_host: description: - URL to your Tower instance. required: False default: null tower_username: description: - Username for your Tower instance. required: False default: null tower_password: description: - Password for your Tower instance. required: False default: null tower_verify_ssl: description: - Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: False default: True tower_config_file: description: - Path to the Tower config file. See notes. required: False default: null requirements: - "python >= 2.6" - "ansible-tower-cli >= 3.0.3" notes: - If no I(config_file) is provided we will attempt to use the tower-cli library defaults to find your Tower host information. - I(config_file) should contain Tower configuration in the following format host=hostname username=username password=password ''' EXAMPLES = ''' - name: Add tower inventory tower_inventory: name: "Foo Inventory" description: "Our Foo Cloud Servers" organization: "Bar Org" state: present tower_config_file: "~/tower_cli.cfg" ''' try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode HAS_TOWER_CLI = True except ImportError: HAS_TOWER_CLI = False def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), description=dict(), organization=dict(required=True), variables=dict(), tower_host=dict(), tower_username=dict(), tower_password=dict(no_log=True), tower_verify_ssl=dict(type='bool', default=True), tower_config_file=dict(type='path'), state=dict(choices=['present', 'absent'], default='present'), ), supports_check_mode=True ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') name = module.params.get('name') description = module.params.get('description') organization = module.params.get('organization') variables = module.params.get('variables') state = module.params.get('state') json_output = {'inventory': name, 'state': state} tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) inventory = tower_cli.get_resource('inventory') try: org_res = tower_cli.get_resource('organization') org = org_res.get(name=organization) if state == 'present': result = inventory.modify(name=name, organization=org['id'], variables=variables, description=description, create_on_missing=True) json_output['id'] = result['id'] elif state == 'absent': result = inventory.delete(name=name, organization=org['id']) except (exc.NotFound) as excinfo: module.fail_json(msg='Failed to update inventory, organization not found: {0}'.format(excinfo), changed=False) except (exc.ConnectionError, exc.BadRequest) as excinfo: module.fail_json(msg='Failed to update inventory: {0}'.format(excinfo), changed=False) json_output['changed'] = result['changed'] module.exit_json(**json_output) from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main()
gpl-3.0
aattaran/Machine-Learning-with-Python
CTCI/Chapter 2/Question2_7.py
1
2591
from classes.LinkedList import * # Iterative approch def isPalindrome_iter(linkedlist): if linkedlist.head == None: return None fast = linkedlist.head slow = linkedlist.head firsthalf = [] while fast != None and fast.next != None: firsthalf.append(slow.value) slow = slow.next fast = fast.next.next if fast != None: slow = slow.next while slow != None: if firsthalf.pop() != slow.value: return False else: slow = slow.next return True # Recursive approch def isPalindrome_recu(linkedlist): length = lengthOfLinkedlist(linkedlist) current = linkedlist.head result = isPalindrome_recu_helper(current, length) return result[1] def isPalindrome_recu_helper(current, length): if current == None: return [None, True] elif length == 1: return [current.next, True] elif length == 2: return [current.next.next, current.value == current.next.value] # result is a python list stores two variables result = isPalindrome_recu_helper(current.next, length - 2) if (result[0] == None) or (not result[1]): return result else: result[1] = current.value == result[0].value result[0] = result[0].next return result def lengthOfLinkedlist(linkedlist): length = 0 current = linkedlist.head while current != None: length += 1 current = current.next return length # -------------------test------------------ L1 = randomLinkedList(3, 3, 4) print "L2:", L1 print "isPalindrome_iter: ", isPalindrome_iter(L1) print "isPalindrome_recu: ", isPalindrome_recu(L1) L2 = LinkedList() for i in range(1,4): L2.addNode(i) for i in range(3, 0, -1): L2.addNode(i) print "L3:", L2 print "isPalindrome_iter: ", isPalindrome_iter(L2) print "isPalindrome_recu: ", isPalindrome_recu(L2) # Another method: reverse the list and check if they are the same def isPalindrome(L1): reverseL1 = reverseList(L1) return isEqual(L1, reverseL1) def reverseList(L1): reverseL1 = LinkedList() current = L1.head while current != None: reverseL1.addNode(current.value) current = current.next return reverseL1 def isEqual(L1,L2): curr1 = L1.head curr2 = L2.head while curr1 != None and curr2 != None: if curr1.value != curr2.value: return False curr1 = curr1.next curr2 = curr2.next if curr1 != None or curr2 != None: return False else: return True for i in range(27): L1 = randomLinkedList(3, 3, 5) print L1 print isPalindrome(L1)
bsd-3-clause
datamicroscopes/kernels
bin/plot.py
1
2322
import sys import argparse import os import json import numpy as np import matplotlib.pylab as plt def draw(obj, outfile): groups, entities_per_group, features = ( obj['args']['groups'], obj['args']['entities_per_group'], obj['args']['features'], ) results = obj['results'] results = np.array(results).reshape( (len(groups), len(entities_per_group), len(features))) groups = np.array(groups, dtype=np.float) for i in xrange(len(features)): data = results[:, :, i] linear = groups * \ (data[0, 0] / (float(entities_per_group[0]) * groups[0]) / groups[0]) plt.plot(groups, linear, 'k--') for j in xrange(len(entities_per_group)): plt.plot( groups, data[:, j] / (float(entities_per_group[j]) * groups)) legend = ['linear'] legend.extend(['gsize {}'.format(gsize) for gsize in entities_per_group]) plt.legend(legend, loc='lower right') plt.xlabel('groups') plt.ylabel('time/iteration/entity (sec)') plt.ylim(ymin=0) plt.tight_layout() plt.savefig(outfile) plt.close() def main(args): parser = argparse.ArgumentParser() parser.add_argument("--results-dir", required=True) parser.add_argument("--sync", action='store_true') parser.add_argument("--volume") args = parser.parse_args(args) if args.sync and not args.volume: raise ValueError("--sync requires --volume") if args.sync: import multyvac vol = multyvac.volume.get(args.volume) vol.sync_down("", args.results_dir) for dirpath, _, filenames in os.walk(args.results_dir): for fname in filenames: toks = fname.split(".") if len(toks) != 2 or toks[1] != 'json': continue p = os.path.join(dirpath, fname) outp = os.path.join(dirpath, '{}.pdf'.format(toks[0])) with open(p, 'r') as fp: try: obj = json.load(fp) except ValueError: print "skipping file {}".format(p) continue draw(obj, outp) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
bsd-3-clause
YxomNPO/YxomCoin
qa/rpc-tests/test_framework/blocktools.py
39
2214
# blocktools.py - utilities for manipulating blocks and transactions # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from .mininode import * from .script import CScript, OP_TRUE, OP_CHECKSIG # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time()+600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey = None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 500 * COIN halvings = int(height/150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [ coinbaseoutput ] coinbase.calc_sha256() return coinbase # Create a transaction with an anyone-can-spend output, that spends the # nth output of prevtx. def create_transaction(prevtx, n, sig, value): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, b"")) tx.calc_sha256() return tx
mit
BehavioralInsightsTeam/edx-platform
common/lib/xmodule/xmodule/randomize_module.py
17
3882
import logging import random from lxml import etree from web_fragments.fragment import Fragment from xblock.fields import Integer, Scope from xmodule.seq_module import SequenceDescriptor from xmodule.x_module import STUDENT_VIEW, XModule log = logging.getLogger('edx.' + __name__) class RandomizeFields(object): choice = Integer(help="Which random child was chosen", scope=Scope.user_state) class RandomizeModule(RandomizeFields, XModule): """ Chooses a random child module. Chooses the same one every time for each student. Example: <randomize> <problem url_name="problem1" /> <problem url_name="problem2" /> <problem url_name="problem3" /> </randomize> User notes: - If you're randomizing amongst graded modules, each of them MUST be worth the same number of points. Otherwise, the earth will be overrun by monsters from the deeps. You have been warned. Technical notes: - There is more dark magic in this code than I'd like. The whole varying-children + grading interaction is a tangle between super and subclasses of descriptors and modules. """ def __init__(self, *args, **kwargs): super(RandomizeModule, self).__init__(*args, **kwargs) # NOTE: calling self.get_children() doesn't work until we've picked a choice num_choices = len(self.descriptor.get_children()) if self.choice > num_choices: # Oops. Children changed. Reset. self.choice = None if self.choice is None: # choose one based on the system seed, or randomly if that's not available if num_choices > 0: if self.system.seed is not None: self.choice = self.system.seed % num_choices else: self.choice = random.randrange(0, num_choices) if self.choice is not None: # Now get_children() should return a list with one element log.debug("children of randomize module (should be only 1): %s", self.child) @property def child_descriptor(self): """ Return descriptor of selected choice """ if self.choice is None: return None return self.descriptor.get_children()[self.choice] @property def child(self): """ Return module instance of selected choice """ child_descriptor = self.child_descriptor if child_descriptor is None: return None return self.system.get_module(child_descriptor) def get_child_descriptors(self): """ For grading--return just the chosen child. """ if self.child_descriptor is None: return [] return [self.child_descriptor] def student_view(self, context): if self.child is None: # raise error instead? In fact, could complain on descriptor load... return Fragment(content=u"<div>Nothing to randomize between</div>") return self.child.render(STUDENT_VIEW, context) def get_icon_class(self): return self.child.get_icon_class() if self.child else 'other' class RandomizeDescriptor(RandomizeFields, SequenceDescriptor): # the editing interface can be the same as for sequences -- just a container module_class = RandomizeModule resources_dir = None filename_extension = "xml" show_in_read_only_mode = True def definition_to_xml(self, resource_fs): xml_object = etree.Element('randomize') for child in self.get_children(): self.runtime.add_block_as_child_node(child, xml_object) return xml_object def has_dynamic_children(self): """ Grading needs to know that only one of the children is actually "real". This makes it use module.get_child_descriptors(). """ return True
agpl-3.0
EdwardJKim/enhance
sres.py
1
9492
""" Modified from https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import sys import glob import random import tensorflow as tf import sres_input from ops import periodic_shuffle FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer( 'batch_size', 64, """Number of images to process in a batch.""") tf.app.flags.DEFINE_string( 'data_dir', '/notebooks/shared/videos/webcam', """Path to the data directory.""") tf.app.flags.DEFINE_boolean( 'use_fp16', False, """Train the model using fp16.""") tf.app.flags.DEFINE_integer( 'upscale_factor', 4, """The magnify factor.""") tf.app.flags.DEFINE_float( 'initial_learning_rate', 0.0001, """The initial learning rate.""") tf.app.flags.DEFINE_float( 'adam_momentum', 0.5, """The beta1 momentum in Adam optimizer.""") tf.app.flags.DEFINE_integer( 'num_filters', 64, """Number of filters in the convolutional layers.""") tf.app.flags.DEFINE_float( 'initial_weights_stddev', 0.02, """The standard deviation of the truncated Gaussian for conv kernels.""") tf.app.flags.DEFINE_integer( 'first_filter_size', 5, """Size of filters in the first convolutional layer.""") tf.app.flags.DEFINE_integer( 'second_filter_size', 3, """Size of filters in the second convolutional layer.""") tf.app.flags.DEFINE_integer( 'third_filter_size', 3, """Size of filters in the third convolutional layer.""") IMAGE_HEIGHT = sres_input.IMAGE_HEIGHT IMAGE_WIDTH = sres_input.IMAGE_WIDTH NUM_CHANNELS = sres_input.NUM_CHANNELS NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = sres_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = sres_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL # Constants describing the training process. INITIAL_LEARNING_RATE = FLAGS.initial_learning_rate ADAM_MOMENTUM = FLAGS.adam_momentum NUM_FILTERS = FLAGS.num_filters INITIAL_WEIGHTS_STDDEV = FLAGS.initial_weights_stddev FIRST_FILTER_SIZE = FLAGS.first_filter_size SECOND_FILTER_SIZE = FLAGS.second_filter_size THIRD_FILTER_SIZE = FLAGS.third_filter_size def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/gpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 # https://github.com/tensorflow/tensorflow/issues/1317 try: var = tf.get_variable( name, shape, initializer=initializer, dtype=dtype) except: with tf.variable_scope(tf.get_variable_scope(), reuse=True): var = tf.get_variable( name, shape, initializer=initializer, dtype=dtype) return var def _initialized_variable(name, shape, stddev): """ Helper to create a Variable initialized with a truncated normal distribution. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) return var def inputs(eval_data=False): """Construct input for training using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Returns ------- images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises ------ ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') if eval_data: filenames = glob.glob(os.path.join(FLAGS.data_dir, 'valid', '*')) else: filenames = glob.glob(os.path.join(FLAGS.data_dir, 'train', '*')) random.shuffle(filenames) images = sres_input.inputs( filenames=filenames, batch_size=FLAGS.batch_size, shuffle=eval_data) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) return images def generator(input_image): batch_size = tf.shape(input_image)[0] with tf.variable_scope('gen'): with tf.variable_scope('deconv1'): kernel_shape=[ 1, FIRST_FILTER_SIZE, FIRST_FILTER_SIZE, NUM_FILTERS, NUM_CHANNELS] kernel = _initialized_variable( 'weights', shape=kernel_shape, stddev=INITIAL_WEIGHTS_STDDEV) deconv_shape = [ batch_size, 2, int(IMAGE_HEIGHT / FLAGS.upscale_factor), int(IMAGE_WIDTH / FLAGS.upscale_factor), NUM_FILTERS] conv_t = tf.nn.conv3d_transpose( input_image, kernel, output_shape=deconv_shape, strides=[1, 1, 1, 1, 1]) # https://github.com/tensorflow/tensorflow/issues/833 conv_t = tf.reshape(conv_t, deconv_shape) biases = _variable_on_cpu( 'biases', [NUM_FILTERS], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv_t, biases) # prelu alphas = _variable_on_cpu( 'alpha', [NUM_FILTERS], tf.constant_initializer(0.2)) deconv1 = tf.nn.relu(bias) + alphas * (bias - abs(bias)) * 0.5 with tf.variable_scope('deconv2'): kernel_shape=[ 1, SECOND_FILTER_SIZE, SECOND_FILTER_SIZE, NUM_FILTERS, NUM_FILTERS] kernel = _initialized_variable( 'weights', shape=kernel_shape, stddev=INITIAL_WEIGHTS_STDDEV) deconv_shape = [ batch_size, 2, int(IMAGE_HEIGHT / FLAGS.upscale_factor), int(IMAGE_WIDTH / FLAGS.upscale_factor), NUM_FILTERS] conv_t = tf.nn.conv3d_transpose( deconv1, kernel, output_shape=deconv_shape, strides=[1, 1, 1, 1, 1]) conv_t = tf.reshape(conv_t, deconv_shape) biases = _variable_on_cpu( 'biases', [NUM_FILTERS], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv_t, biases) # prelu alphas = _variable_on_cpu( 'alpha', [NUM_FILTERS], tf.constant_initializer(0.2)) deconv2 = tf.nn.relu(bias) + alphas * (bias - abs(bias)) * 0.5 with tf.variable_scope('deconv3'): kernel_shape=[ 1, THIRD_FILTER_SIZE, THIRD_FILTER_SIZE, 3 * FLAGS.upscale_factor ** 2, NUM_FILTERS] kernel = _initialized_variable( 'weights', shape=kernel_shape, stddev=INITIAL_WEIGHTS_STDDEV) deconv_shape = [ batch_size, 2, int(IMAGE_HEIGHT / FLAGS.upscale_factor), int(IMAGE_WIDTH / FLAGS.upscale_factor), 3 * FLAGS.upscale_factor ** 2] conv_t = tf.nn.conv3d_transpose( deconv2, kernel, output_shape=deconv_shape, strides=[1, 1, 1, 1, 1]) conv_t = tf.reshape(conv_t, deconv_shape) biases = _variable_on_cpu( 'biases', [3 * FLAGS.upscale_factor ** 2], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv_t, biases) with tf.variable_scope('ps'): output = periodic_shuffle( bias, FLAGS.upscale_factor, color=True) return tf.nn.tanh(output) def loss(real, fake): """ Mean squared error loss. """ mse = tf.reduce_mean(tf.square(tf.subtract(real, fake))) tf.add_to_collection('losses', mse) return tf.add_n(tf.get_collection('losses'), name='total_loss') def valid_loss(real, fake): """ Also mean squared loss for validation, but we use a separate function for different namespace. """ mse = tf.reduce_mean(tf.square(tf.subtract(real, fake))) tf.add_to_collection('valid_losses', mse) return tf.add_n(tf.get_collection('valid_losses'), name='total_valid_loss') def train(total_loss): """Train model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Compute gradients. with tf.control_dependencies([total_loss]): opt = tf.train.AdamOptimizer(INITIAL_LEARNING_RATE, beta1=ADAM_MOMENTUM) var_list = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope='gen') apply_op = opt.minimize(total_loss, var_list=var_list) with tf.control_dependencies([apply_op]): train_op = tf.no_op(name='train') return train_op
apache-2.0
viblo/pymunk
dump/exlib.py
3
2851
from ctypes import * #ctypes.cdll.LoadLibrary("libex.dylib") chipmunk_lib = cdll.LoadLibrary("ex.dll") #function_pointer = CFUNCTYPE function_pointer = WINFUNCTYPE float_type = c_double cpFloat = c_double class Vec2d(Structure): __slots__ = ['x', 'y'] @classmethod def from_param(cls, arg): """Used by ctypes to automatically create Vec2ds""" return cls(arg) # def __init__(self, x_or_pair=None, y = None): # if x_or_pair != None: # if y == None: # self.x = x_or_pair[0] # self.y = x_or_pair[1] # else: # self.x = x_or_pair # self.y = y # String representaion (for debugging) def __repr__(self): return 'Vec2d(%s, %s)' % (self.x, self.y) Vec2d._fields_ = [ ('x', float_type), ('y', float_type), ] cpVect = Vec2d STRING = c_char_p class cpBody(Structure): pass cpBodyVelocityFunc = function_pointer(None, POINTER(cpBody), cpVect, cpFloat, cpFloat) cpBodyPositionFunc = function_pointer(None, POINTER(cpBody), cpFloat) #cpBody._pack_ = 4 cpBody._fields_ = [ ('velocity_func', cpBodyVelocityFunc), ('position_func', cpBodyPositionFunc), ('m', cpFloat), ('m_inv', cpFloat), ('i', cpFloat), ('i_inv', cpFloat), ('p', cpVect), ('v', cpVect), ('f', cpVect), ('a', cpFloat), ('w', cpFloat), ('t', cpFloat), ('rot', cpVect), #('data', cpDataPointer), ('v_limit', cpFloat), ('w_limit', cpFloat), ('v_bias_private', cpVect), ('w_bias_private', cpFloat), #('space_private', POINTER(cpSpace)), #('shapeList_private', POINTER(cpShape)), #('arbiterList_private', POINTER(cpArbiter)), #('constraintList_private', POINTER(cpConstraint)), #('node_private', cpComponentNode), ] cpBodyAlloc = chipmunk_lib.cpBodyAlloc cpBodyAlloc.restype = POINTER(cpBody) cpBodyAlloc.argtypes = [] cpBodyInit = chipmunk_lib.cpBodyInit cpBodyInit.restype = POINTER(cpBody) cpBodyInit.argtypes = [POINTER(cpBody), cpFloat, cpFloat] cpBodyNew = chipmunk_lib.cpBodyNew cpBodyNew.restype = POINTER(cpBody) cpBodyNew.argtypes = [cpFloat, cpFloat] class Body(object): def __init__(self, mass, moment): self._body = cpBodyNew(mass, moment) self._bodycontents = self._body.contents def _set_mass(self, mass): cpBodySetMass(self._body, mass) def _get_mass(self): return self._bodycontents.m mass = property(_get_mass, _set_mass) def _set_moment(self, moment): cpBodySetMoment(self._body, moment) def _get_moment(self): return self._bodycontents.i moment = property(_get_moment, _set_moment)
mit
asvetlov/steward
steward/__init__.py
1
9627
__version__ = '0.0.2' from collections import OrderedDict, MutableMapping, MutableSequence, Sequence from copy import deepcopy sentinel = object() class Error(Exception): """Base error""" class Field: name = None def __init__(self, *, default=sentinel, const=False, maker=sentinel): self.default = default self.maker = maker self.const = const if self.maker is not sentinel: if self.default is not sentinel: raise TypeError("Cannot specify both `default` and `maker`") if not callable(self.maker): raise TypeError("`maker` should be callable " "accepting no parameters.") def set_name(self, name): assert self.name is None, self.name self.name = name def __get__(self, instance, owner): if instance is None: return self name = self.name assert name ret = instance.__dict__.get(name, sentinel) if ret is sentinel: ret, plainval = self.getter(instance._plain_.get(name, sentinel)) instance.__dict__[name] = ret instance._plain_[name] = plainval return ret def getter(self, plainval): ret = plainval if ret is sentinel and self.default is not sentinel: ret = self.default elif ret is sentinel and self.maker is not sentinel: ret = self.maker() elif ret is sentinel: raise AttributeError("'{.name}' is not initialized".format(self)) return ret, ret def __set__(self, instance, value, check_const=True): name = self.name assert name if check_const and self.const: raise AttributeError( "Constant '{.name}' cannot be set".format(self)) ret, plainval = self.setter(value) instance.__dict__[name] = ret instance._plain_[name] = plainval def setter(self, value): return value, value class FieldComp(Field): def __init__(self, type, *, default=sentinel, const=False, maker=sentinel): if not issubclass(type, Component): raise TypeError("`type` should be subclass of steward.Component") if default is not None and default is not sentinel: if not isinstance(default, type): raise TypeError("an {} is required, got {}".format( self.type.__name__, type(default).__name__)) super().__init__(default=default, const=const, maker=maker) self.type = type def setter(self, value): if value is None: return None, None if not isinstance(value, self.type): raise TypeError("an {} is required, got {}".format( self.type.__name__, type(value).__name__)) return value, value._plain_ def getter(self, plainval): ret = plainval if ret is sentinel and self.default is not sentinel: if self.default is None: return None, None else: return self.default, self.default._plain_ if ret is sentinel and self.maker is not sentinel: val = self.maker() if val is None: return None, None if not isinstance(val, self.type): raise TypeError("`maker` should return object of type {!r}". format(self.type)) return val, val._plain_ elif ret is sentinel: raise AttributeError("'{.name}' is not initialized".format(self)) else: return self.type.from_plain(ret) if ret is not None else None, ret class DictProxy(MutableMapping): @classmethod def _from_plain(cls, type, plainval): ret = cls(type) ret._plain_ = plainval return ret def __init__(self, type): self.type = type self._plain_ = {} self.__objects = {} def as_plain(self): return self._plain_ def __len__(self): return len(self._plain_) def __iter__(self): return iter(self._plain_) def __getitem__(self, key): ret = self.__objects.get(key, sentinel) if ret is not sentinel: return ret plainitem = self._plain_[key] assert isinstance(plainitem, dict) ret = self.type.from_plain(plainitem) self.__objects[key] = ret return ret def __setitem__(self, key, val): assert isinstance(val, self.type) self.__objects[key] = val self._plain_[key] = val._plain_ def __delitem__(self, key): del self._plain_[key] self.__objects.pop(key, None) class FieldDict(Field): def __init__(self, type): super().__init__() self.type = type def setter(self, value): raise AttributeError("FieldDict cannot be set") def getter(self, plainval): if plainval is sentinel: plainval = {} assert isinstance(plainval, dict) return DictProxy._from_plain(self.type, plainval), plainval class ListProxy(MutableSequence): @classmethod def _from_plain(cls, type, plainval): ret = cls(type) ret._plain_ = plainval return ret def __init__(self, type): self.type = type self._plain_ = [] self.__shadow = None def as_plain(self): return self._plain_ def __len__(self): return len(self._plain_) def __iter__(self): if self.__shadow is None: t = self.type self.__shadow = [t.from_plain(i) for i in self._plain_] return iter(self.__shadow) def __getitem__(self, index): if self.__shadow is None: t = self.type self.__shadow = [t.from_plain(i) for i in self._plain_] return self.__shadow[index] def __setitem__(self, index, val): if self.__shadow is None: t = self.type self.__shadow = [t.from_plain(i) for i in self._plain_] self.__shadow[index] = val if isinstance(val, Sequence): nv = [] for i in val: assert isinstace(i, self.type) nv.append(i._plain_) else: nv = val._plain_ self._plain_[index] = nv def __delitem__(self, index): if self.__shadow is None: t = self.type self.__shadow = [t.from_plain(i) for i in self._plain_] del self._plain_[index] del self.__shadow[index] def insert(self, index, value): assert isinstance(value, self.type) if self.__shadow is None: t = self.type self.__shadow = [t.from_plain(i) for i in self._plain_] self.__shadow.insert(index, value) self._plain_.insert(index, value._plain_) class FieldList(Field): def __init__(self, type): super().__init__() self.type = type def setter(self, value): raise AttributeError("FieldList cannot be set") def getter(self, plainval): if plainval is sentinel: plainval = [] if isinstance(plainval, tuple): plainval = list(plainval) assert isinstance(plainval, list), plainval return ListProxy._from_plain(self.type, plainval), plainval class Namespace(OrderedDict): def __init__(self, bases): super().__init__() self.fields = {} for b in bases: if issubclass(b, Component): self.fields.update(b._fields_) def __setitem__(self, key, val): super().__setitem__(key, val) if isinstance(val, Field): val.set_name(key) self.fields[key] = val def __delitem__(self, key): raise RuntimeError("Not allowed") class ComponentMeta(type): @classmethod def __prepare__(cls, name, bases): return Namespace(bases) def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) cls._fields_ = dct.fields cls._names_ = frozenset(dct.fields.keys()) class Component(metaclass=ComponentMeta): def __init__(self, **kwargs): self._plain_ = {} names = self._names_ delta = frozenset(kwargs) - names if delta: extra = ', '.join(sorted(delta)) raise Error("Extra params: '{}'".format(extra)) missing = [] fields = self._fields_ for k, v in kwargs.items(): if k in names: fields[k].__set__(self, v, check_const=False) missing = names - set(kwargs.keys()) if not missing: return to_report = [] klass = self.__class__ for name in missing: field = fields[name] try: field.__get__(self, klass) except AttributeError: to_report.append(name) if to_report: missing = ', '.join(sorted(to_report)) raise Error("Missing params: '{}'".format(missing)) @classmethod def from_plain(cls, plainval): self = object.__new__(cls) self._plain_ = plainval return self def as_plain(self): return self._plain_ def clone(self, **kwargs): ret = self.__class__(**deepcopy(self.as_plain())) fields = self._fields_ for k, v in kwargs.items(): field = fields.get(k) if field is None: raise Error("Unknown field '{}' cannot be set".format(k)) field.__set__(ret, v, check_const=False) return ret
mit
Reilithion/xmms2-reilithion
wafadmin/Tools/glib2.py
1
4843
#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) "GLib2 support" import Task, Utils from TaskGen import taskgen, before, after, feature # # glib-genmarshal # @taskgen def add_marshal_file(self, filename, prefix): if not hasattr(self, 'marshal_list'): self.marshal_list = [] self.meths.append('process_marshal') self.marshal_list.append((filename, prefix)) @taskgen @before('apply_core') def process_marshal(self): for filename, prefix in getattr(self, 'marshal_list', []): node = self.path.find_resource(filename) if not node: raise Utils.WafError('file not found ' + filename) # Generate the header header_env = self.env.copy() header_env['GLIB_GENMARSHAL_PREFIX'] = prefix header_env['GLIB_GENMARSHAL_MODE'] = '--header' task = self.create_task('glib_genmarshal', header_env) task.set_inputs(node) task.set_outputs(node.change_ext('.h')) # Generate the body body_env = self.env.copy() body_env['GLIB_GENMARSHAL_PREFIX'] = prefix body_env['GLIB_GENMARSHAL_MODE'] = '--body' task = self.create_task('glib_genmarshal', body_env) task.set_inputs(node) task.set_outputs(node.change_ext('.c')) # the c file generated will be processed too outnode = node.change_ext('.c') self.allnodes.append(outnode) # # glib-mkenums # @taskgen def add_enums_from_template(self, source='', target='', template='', comments=''): if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'target': target, 'template': template, 'file-head': '', 'file-prod': '', 'file-tail': '', 'enum-prod': '', 'value-head': '', 'value-prod': '', 'value-tail': '', 'comments': comments}) @taskgen def add_enums(self, source='', target='', file_head='', file_prod='', file_tail='', enum_prod='', value_head='', value_prod='', value_tail='', comments=''): if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'template': '', 'target': target, 'file-head': file_head, 'file-prod': file_prod, 'file-tail': file_tail, 'enum-prod': enum_prod, 'value-head': value_head, 'value-prod': value_prod, 'value-tail': value_tail, 'comments': comments}) @taskgen @before('apply_core') def process_enums(self): for enum in getattr(self, 'enums_list', []): # temporary env = self.env.copy() task = self.create_task('glib_mkenums', env) inputs = [] # process the source source_list = self.to_list(enum['source']) if not source_list: raise Utils.WafError('missing source ' + str(enum)) source_list = [self.path.find_resource(k) for k in source_list] inputs += source_list env['GLIB_MKENUMS_SOURCE'] = [k.abspath(env) for k in source_list] # find the target if not enum['target']: raise Utils.WafError('missing target ' + str(enum)) tgt_node = self.path.find_or_declare(enum['target']) if tgt_node.name.endswith('.c'): self.allnodes.append(tgt_node) env['GLIB_MKENUMS_TARGET'] = tgt_node.abspath(env) options = [] if enum['template']: # template, if provided template_node = self.path.find_resource(enum['template']) options.append('--template %s' % (template_node.abspath(env))) inputs.append(template_node) params = {'file-head' : '--fhead', 'file-prod' : '--fprod', 'file-tail' : '--ftail', 'enum-prod' : '--eprod', 'value-head' : '--vhead', 'value-prod' : '--vprod', 'value-tail' : '--vtail', 'comments': '--comments'} for param, option in params.iteritems(): if enum[param]: options.append('%s %r' % (option, enum[param])) env['GLIB_MKENUMS_OPTIONS'] = ' '.join(options) # update the task instance task.set_inputs(inputs) task.set_outputs(tgt_node) Task.simple_task_type('glib_genmarshal', '${GLIB_GENMARSHAL} ${SRC} --prefix=${GLIB_GENMARSHAL_PREFIX} ${GLIB_GENMARSHAL_MODE} > ${TGT}', color='BLUE', before='cc') Task.simple_task_type('glib_mkenums', '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}', color='PINK', before='cc') def detect(conf): glib_genmarshal = conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL') mk_enums_tool = conf.find_program('glib-mkenums', var='GLIB_MKENUMS')
lgpl-2.1
redhat-openstack/infrared
tests/test_plugins.py
3
32365
from six.moves import configparser import os import git import yaml import shutil import sys import tarfile import tempfile import filecmp import pytest from infrared.core.utils.exceptions import IRPluginExistsException, \ IRUnsupportedPluginType from infrared.core.utils.exceptions import IRFailedToAddPlugin from infrared.core.utils.exceptions import IRValidatorException from infrared.core.utils.exceptions import IRFailedToRemovePlugin from infrared.core.utils.exceptions import IRFailedToUpdatePlugin from infrared.core.utils.exceptions import IRUnsupportedSpecOptionType from infrared.core.utils.dict_utils import dict_insert import infrared.core.services.plugins from infrared.core.services.plugins import InfraredPluginManager from infrared.core.services.plugins import InfraredPlugin from infrared.core.utils.validators import SpecValidator, RegistryValidator from infrared.core.services import CoreServices, ServiceName PLUGIN_SPEC = 'plugin.spec' SAMPLE_PLUGINS_DIR = 'tests/example/plugins' SUPPORTED_TYPES_DICT = dict( supported_types=dict( supported_type1='Tools of supported_type1', supported_type2='Tools of supported_type2', provision='Provisioning plugins', install='Installing plugins', test='Testing plugins' ) ) @pytest.fixture() def plugins_conf_fixture(tmpdir): """Creates temporary IR :param tmpdir: builtin pytest fixtures to create temporary files & dirs :return: plugins conf file as a LocalPath object (py.path) """ # Creates temporary plugins conf file lp_dir = tmpdir.mkdir('test_tmp_dir') lp_file = lp_dir.join('.plugins.ini') try: yield lp_file finally: lp_dir.remove() @pytest.fixture() def plugin_manager_fixture(plugins_conf_fixture): """Creates a PluginManager fixture Creates a fixture which returns a PluginManager object based on temporary plugins conf with default values(sections - provision, install & test) :param plugins_conf_fixture: fixture that returns a path of a temporary plugins conf """ lp_file = plugins_conf_fixture def plugin_manager_helper(plugins_conf_dict=None): if plugins_conf_dict is None: plugins_conf_dict = {} plugins_conf_dict.update(SUPPORTED_TYPES_DICT) with lp_file.open(mode='w') as fp: config = configparser.ConfigParser() for section, section_data in plugins_conf_dict.items(): config.add_section(section) for option, value in section_data.items(): config.set(section, option, value) config.write(fp) CoreServices.register_service( ServiceName.PLUGINS_MANAGER, InfraredPluginManager( lp_file.strpath, os.path.join(lp_file.dirname, "plugins"))) return CoreServices.plugins_manager() yield plugin_manager_helper @pytest.fixture() def git_plugin_manager_fixture(tmpdir, plugin_manager_fixture): """Yields an IRPluginManager obj configured with git plugin Just like plugin_manager_fixture but also create two temporary directories that will be used to mimic local and remote git repos of an InfraRed's plugin. The IRPluginManager that will be returned, will be configured with this InfraRed git plugin. :param tmpdir: builtin pytest fixtures to create temporary files & dirs :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_tar_gz = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'example/plugins/git_plugin/git_plugin_repo.tar.gz') plugin_repo_dir = tmpdir.mkdir('plugin_repo_dir') plugin_install_dir = tmpdir.mkdir('plugin_install_dir') t_file = tarfile.open(plugin_tar_gz) t_file.extractall(path=str(plugin_repo_dir)) repo = git.Repo.clone_from( url=str(plugin_repo_dir), to_path=str(plugin_install_dir)) repo.git.config('user.name', 'dummy-user') repo.git.config('user.email', 'dummy@email.com') plugin_spec_dict = get_plugin_spec_flatten_dict(str(plugin_install_dir)) try: plugin_manager = plugin_manager_fixture({ plugin_spec_dict['type']: { plugin_spec_dict['name']: str(plugin_install_dir)} }) yield plugin_manager finally: plugin_repo_dir.remove() plugin_install_dir.remove() def get_plugin_spec_flatten_dict(plugin_dir): """Creates a flat dict from the plugin spec :param plugin_dir: A path to the plugin's dir :return: A flatten dictionary contains the plugin's properties """ with open(os.path.join(plugin_dir, PLUGIN_SPEC)) as fp: spec_yaml = yaml.safe_load(fp) plugin_name = list(spec_yaml['subparsers'].keys())[0] plugin_description = spec_yaml['description'] \ if "description" in spec_yaml \ else spec_yaml['subparsers'][plugin_name]['description'] plugin_type = spec_yaml["config"]["plugin_type"] \ if "config" in spec_yaml \ else spec_yaml["plugin_type"] plugin_spec_dict = dict( name=plugin_name, dir=plugin_dir, description=plugin_description, type=plugin_type ) return plugin_spec_dict def plugin_in_conf(plugins_conf, plugin_type, plugin_name): """Checks if a plugin exists in a conf file :param plugins_conf: A path to the plugins conf file :param plugin_type: The plugin's type :param plugin_name: The Plugin's name :return: True if plugin is in the conf file, otherwise False """ config = configparser.ConfigParser() with open(plugins_conf) as fp: if (sys.version_info > (3, 2)): config.read_file(fp) else: config.readfp(fp) return config.has_option(plugin_type, plugin_name) def test_add_plugin(plugin_manager_fixture): """Tests the ability to add plugins :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_manager = plugin_manager_fixture() for plugin_dir, plugins_cnt in ( ('type1_plugin1', 1), # Add a plugin ('type1_plugin2', 2), # Add a plugin - same type ('type2_plugin1', 3)): # Add a plugin - different type plugin_dict = get_plugin_spec_flatten_dict( os.path.join(SAMPLE_PLUGINS_DIR, plugin_dir)) plugin_manager.add_plugin(plugin_dict['dir']) assert plugin_dict['name'] in plugin_manager.PLUGINS_DICT,\ "Plugin wasn't added to the plugins manager." assert plugin_in_conf( plugins_conf=plugin_manager.config_file, plugin_type=plugin_dict['type'], plugin_name=plugin_dict['name']), \ "Plugin wasn't added to conf file." assert len(plugin_manager.PLUGINS_DICT) == plugins_cnt def test_load_plugin(plugin_manager_fixture): """Test that an existing plugin can be loaded and it's properties :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_dir = 'type1_plugin1' plugin_dict = get_plugin_spec_flatten_dict( os.path.join(os.path.abspath(SAMPLE_PLUGINS_DIR), plugin_dir)) plugin_manager = plugin_manager_fixture({ plugin_dict['type']: { plugin_dict['name']: plugin_dict['dir']} }) plugin = plugin_manager.get_plugin(plugin_name=plugin_dict['name']) assert type(plugin) is InfraredPlugin, "Failed to add a plugin" assert plugin.name == plugin_dict['name'], "Wrong plugin name" assert plugin.description == plugin_dict['description'], \ 'Wrong plugin description' def test_entry_point(plugin_manager_fixture): """Test that spec file has a valid entry point :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_dir = 'plugin_with_entry_point' plugin_dict = get_plugin_spec_flatten_dict( os.path.join(os.path.abspath(SAMPLE_PLUGINS_DIR), plugin_dir)) plugin_manager = plugin_manager_fixture({ plugin_dict['type']: { plugin_dict['name']: plugin_dict['dir']} }) plugin = plugin_manager.get_plugin(plugin_name=plugin_dict['name']) assert plugin.playbook == os.path.join(plugin_dict['dir'], "example.yml") def test_add_plugin_with_same_name(plugin_manager_fixture): """Tests that it not possible to add a plugin with a name that already exists :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_dir = 'type1_plugin1' plugin_dict = get_plugin_spec_flatten_dict( os.path.join(SAMPLE_PLUGINS_DIR, plugin_dir)) plugin_manager = plugin_manager_fixture({ plugin_dict['type']: { plugin_dict['name']: plugin_dict['dir']} }) plugins_cfg_mtime_before_add = os.path.getmtime(plugin_manager.config_file) plugins_cnt_before_try = len(plugin_manager.PLUGINS_DICT) with pytest.raises(IRPluginExistsException): plugin_manager.add_plugin(plugin_dict['dir']) assert plugins_cnt_before_try == len(plugin_manager.PLUGINS_DICT) assert os.path.getmtime( plugin_manager.config_file) == plugins_cfg_mtime_before_add, \ "Plugins configuration file has been modified." def test_add_plugin_unsupported_type(plugin_manager_fixture): """Test that it's not possible to add a plugin from unsupported type :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_manager = plugin_manager_fixture() plugin_dict = get_plugin_spec_flatten_dict( os.path.join(SAMPLE_PLUGINS_DIR, 'unsupported_plugin')) plugins_cfg_mtime_before_add = os.path.getmtime(plugin_manager.config_file) plugins_cnt_before_try = len(plugin_manager.PLUGINS_DICT) with pytest.raises(IRUnsupportedPluginType): plugin_manager.add_plugin(plugin_dict['dir']) assert not plugin_in_conf( plugins_conf=plugin_manager.config_file, plugin_type=plugin_dict['type'], plugin_name=plugin_dict['name']), \ "Plugin was added to conf file." assert plugins_cnt_before_try == len(plugin_manager.PLUGINS_DICT) assert os.path.getmtime( plugin_manager.config_file) == plugins_cfg_mtime_before_add, \ "Plugins configuration file has been modified." def test_remove_plugin(plugin_manager_fixture): """ Tests the ability to remove a plugin :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugins_conf = {} for plugin_dir in ('type1_plugin1', 'type1_plugin2', 'type2_plugin1'): plugin_dict = get_plugin_spec_flatten_dict( os.path.join(os.path.abspath(SAMPLE_PLUGINS_DIR), plugin_dir)) dict_insert(plugins_conf, plugin_dict['dir'], plugin_dict['type'], plugin_dict['name'],) plugin_manager = plugin_manager_fixture(plugins_conf) for plugin_dir, plugins_cnt in ( ('type1_plugin1', 2), ('type2_plugin1', 1), ('type1_plugin2', 0)): plugin_dict = get_plugin_spec_flatten_dict( os.path.join(SAMPLE_PLUGINS_DIR, plugin_dir)) assert plugin_dict['name'] in plugin_manager.PLUGINS_DICT, \ "Can't remove unexisting plugin" plugin_manager.remove_plugin(plugin_dict['name']) with pytest.raises(KeyError): plugin_manager.get_plugin(plugin_name=plugin_dict['name']) assert not plugin_in_conf( plugins_conf=plugin_manager.config_file, plugin_type=plugin_dict['type'], plugin_name=plugin_dict['name']), \ "Plugin wasn't removed from conf file." assert len(plugin_manager.PLUGINS_DICT) == plugins_cnt def test_remove_unexisting_plugin(plugin_manager_fixture): """Tests the behavior of removing unexisting plugin Checks that no exception is being raised and no changes in InfraredPluginManager dict and configuration file :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_manager = plugin_manager_fixture() plugins_cfg_mtime_before_add = os.path.getmtime(plugin_manager.config_file) plugins_cnt_before_try = len(plugin_manager.PLUGINS_DICT) with pytest.raises(IRFailedToRemovePlugin): plugin_manager.remove_plugin('unexisting_plugin') assert plugins_cnt_before_try == len(plugin_manager.PLUGINS_DICT) assert os.path.getmtime( plugin_manager.config_file) == plugins_cfg_mtime_before_add, \ "Plugins configuration file has been modified." @pytest.mark.parametrize("input_args, plugins_conf", [ ("plugin list", None), ("plugin add tests/example/plugins/type1_plugin1", None), ("plugin remove type1_plugin1", dict( supported_type1=dict( type1_plugin1='tests/example/plugins/type1_plugin1'))), ("plugin add " "tests/example/plugins/type1_plugin1 " "tests/example/plugins/type1_plugin2", None), ("plugin remove type1_plugin1 type1_plugin2", dict( supported_type1=dict( type1_plugin1='tests/example/plugins/type1_plugin1', type1_plugin2='tests/example/plugins/type1_plugin2'))), ]) def test_plugin_cli(plugin_manager_fixture, input_args, plugins_conf): """Tests that plugin CLI works :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object :param input_args: infrared's testing arguments :param plugins_conf: Plugins conf data as a dictionary """ plugin_manager_fixture(plugins_conf) from infrared.main import main as ir_main rc = ir_main(input_args.split()) assert rc == 0, \ "Return code ({}) != 0, cmd='infrared {}'".format(rc, input_args) def test_add_plugin_no_spec(plugin_manager_fixture): """Tests that it's not possible to add plugin without a spec file :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_dir = os.path.join(SAMPLE_PLUGINS_DIR, 'plugin_without_spec') plugin_manager = plugin_manager_fixture({}) plugins_cfg_mtime_before_add = os.path.getmtime(plugin_manager.config_file) plugins_cnt_before_try = len(plugin_manager.PLUGINS_DICT) with pytest.raises(IRValidatorException): plugin_manager.add_plugin(plugin_dir) assert plugins_cnt_before_try == len(plugin_manager.PLUGINS_DICT) assert os.path.getmtime( plugin_manager.config_file) == plugins_cfg_mtime_before_add, \ "Plugins configuration file has been modified." @pytest.mark.parametrize("description, plugin_spec", [ ('no_description', { 'plugin_type': 'supported_type', 'subparsers': { 'sample_plugin1:': {}}}), ('no_type', { 'description': 'some plugin description', 'subparsers': { 'sample_plugin1:': {}}}), ('no_value', { 'plugin_type': '', 'subparsers': { 'sample_plugin1:': {}}}), ('no_subparsers_key', { 'plugin_type': 'supported_type', 'description': 'some plugin description'}), ('no_subparsers_value', { 'plugin_type': 'supported_type', 'description': 'some plugin description', 'subparsers': ''}), ('no_entry_point_value',{ 'plugin_type': 'supported_type', 'entry_point': '', 'subparsers': { 'sample_plugin1:': {}}}), ('no_entry_point_value_in_config',{ 'config': { "plugin_type": 'supported_type', "entry_point": '', }, 'subparsers': { 'sample_plugin1:': {}}}), ('no_type_in_config', { 'config': { }, 'description': 'some plugin description', 'subparsers': { 'sample_plugin1:': {}}}), ]) def test_add_plugin_corrupted_spec(tmpdir_factory, description, plugin_spec): """Tests that it's not possible to add a plugin with invalid spec file :param tmpdir_factory: pytest builtin fixture for creating temp dirs :param description: test description (adds a description in pytest run) :param plugin_spec: dictionary with data for spec file :return: """ lp_dir = tmpdir_factory.mktemp('test_tmp_dir') lp_file = lp_dir.join('plugin.spec') with open(lp_file.strpath, 'w') as fp: yaml.dump(plugin_spec, fp, default_flow_style=True) try: with pytest.raises(IRValidatorException): SpecValidator.validate_from_file(lp_file.strpath) finally: lp_dir.remove() def test_plugin_with_unsupporetd_option_type_in_spec(plugin_manager_fixture): """Tests that the user get a proper error :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ plugin_dir = os.path.join(SAMPLE_PLUGINS_DIR, 'plugin_with_unsupported_option_type_in_spec') plugin_dict = get_plugin_spec_flatten_dict(plugin_dir) plugin_manager = plugin_manager_fixture() plugin_manager.add_plugin(plugin_dir) from infrared.main import main as ir_main with pytest.raises(IRUnsupportedSpecOptionType): ir_main([plugin_dict['name'], '--help']) def test_add_plugin_from_git(plugin_manager_fixture, mocker): plugin_manager = plugin_manager_fixture() mock_git = mocker.patch("infrared.core.services.plugins.git.Repo") mock_os = mocker.patch("infrared.core.services.plugins.os") mock_os.path.exists.return_value = False mock_os.listdir.return_value = ["sample_plugin"] mock_tempfile = mocker.patch("infrared.core.services.plugins.tempfile") mock_shutil = mocker.patch("infrared.core.services.plugins.shutil") plugin_dict = get_plugin_spec_flatten_dict( os.path.join(SAMPLE_PLUGINS_DIR, 'type1_plugin1')) mock_os.path.join.return_value = os.path.join(plugin_dict["dir"], PLUGIN_SPEC) # add_plugin call plugin_manager.add_plugin( "https://sample_github.null/plugin_repo.git", rev="test", skip_roles=True) mock_tempfile.mkdtemp.assert_called_once() mock_git.clone_from.assert_called_with( url='https://sample_github.null/plugin_repo.git', to_path=mock_os.path.join.return_value, kill_after_timeout=300) mock_os.join.has_call(SAMPLE_PLUGINS_DIR, mock_os.listdir.return_value[0]) mock_os.join.has_call(mock_tempfile.mkdtemp.return_value, mock_os.listdir.return_value[0]) mock_shutil.rmtree.assert_called_with(mock_os.path.join.return_value) def test_add_plugin_from_git_dirname_from_spec(plugin_manager_fixture, mocker): """ Validate that we take the folder name from the spec plugin name instead of the git repo name :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object :param mocker: mocker fixture """ def clone_from_side_effect(url, to_path, **kwargs): """ Define a side effect function to override the original behaviour of clone_from """ shutil.copytree(src=plugin_dict["dir"], dst=to_path) plugin_manager = plugin_manager_fixture() mock_git = mocker.patch("infrared.core.services.plugins.git.Repo") # use side effect to use copytree instead of original clone mock_git.clone_from.side_effect = clone_from_side_effect mock_os_path_exists = mocker.patch( "infrared.core.services.plugins.os.path.exists") # set to false in order to enter the git section # in if/else inside add_plugin func mock_os_path_exists.return_value = False mock_tempfile = mocker.patch("infrared.core.services.plugins.tempfile") mock_tempfile.mkdtemp.return_value = tempfile.mkdtemp(prefix="ir-") mock_shutil = mocker.patch("infrared.core.services.plugins.shutil") plugin_dict = get_plugin_spec_flatten_dict( os.path.abspath(os.path.join(SAMPLE_PLUGINS_DIR, 'type1_plugin1'))) # add_plugin call with pytest.raises(IRFailedToAddPlugin): plugin_manager.add_plugin( "https://sample_github.null/plugin_repo.git") mock_shutil.rmtree.assert_called_with(os.path.join( mock_tempfile.mkdtemp.return_value, "plugin_repo")) # clean tmp folder shutil.rmtree(mock_tempfile.mkdtemp.return_value) # check it was cloned with the temp name mock_git.clone_from.assert_called_with( url='https://sample_github.null/plugin_repo.git', to_path=os.path.join( mock_tempfile.mkdtemp.return_value, "plugin_repo"), kill_after_timeout=300) # check that it was copied with the plugin name and not repo name mock_shutil.copytree.assert_called_with( os.path.join(mock_tempfile.mkdtemp.return_value, "plugin_repo"), os.path.join(plugin_manager.plugins_dir, plugin_dict["name"])) def test_add_plugin_from_git_exception(plugin_manager_fixture, mocker): plugin_manager = plugin_manager_fixture() mock_git = mocker.patch("infrared.core.services.plugins.git") mock_git.Repo.clone_from.side_effect = git.exc.GitCommandError( "some_git_cmd", 1) mock_git.exc.GitCommandError = git.exc.GitCommandError mock_tempfile = mocker.patch("infrared.core.services.plugins.tempfile") mock_shutil = mocker.patch("infrared.core.services.plugins.shutil") mock_os = mocker.patch("infrared.core.services.plugins.os") mock_os.path.exists.return_value = False # add_plugin call with pytest.raises(IRFailedToAddPlugin): plugin_manager.add_plugin( "https://sample_github.null/plugin_repo.git") mock_shutil.rmtree.assert_called_with(mock_tempfile.mkdtemp.return_value) def validate_plugins_presence_in_conf( plugin_manager, plugins_dict, present=True): """Validate presence of plugins in the configuration file :param plugin_manager: InfraredPluginManager object :param plugins_dict: Dict of plugins {plugin_name: plugin_dir_path, ...} :param present: Whether all plugins in the dict should be present in the plugins configuration file or not. """ assert present in (True, False), \ "'absent' accept only Boolean values, got: '{}'".format(str(present)) with open(plugin_manager.config_file) as config_file: plugins_cfg = configparser.ConfigParser() if (sys.version_info > (3, 2)): plugins_cfg.read_file(config_file) else: plugins_cfg.readfp(config_file) for plugin_path in plugins_dict.values(): plugin = InfraredPlugin(plugin_path['src']) if present: assert plugins_cfg.has_option(plugin.type, plugin.name), \ "Plugin '{}' was suppose to be in the plugins " \ "configuration file".format(plugin.name) else: assert not plugins_cfg.has_option(plugin.type, plugin.name), \ "Plugin '{}' wasn't suppose to be in the plugins " \ "configuration file".format(plugin.name) def test_plugin_add_all(plugin_manager_fixture): """Tests the add and remove all plugins functioning :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object """ tests_plugins = ( 'provision_plugin1', 'provision_plugin2', 'install_plugin1', 'install_plugin2', 'test_plugin1', 'test_plugin2' ) tests_plugins_dir = 'tests/example/plugins/add_remove_all_plugins/' plugins_registry = \ dict((pname, {'src': os.path.join(tests_plugins_dir, pname)}) for pname in tests_plugins) plugin_manager = plugin_manager_fixture() # Validates that plugins aren't in configuration file from the beginning validate_plugins_presence_in_conf( plugin_manager, plugins_registry, present=False) # Validates all plugins are in the configuration file plugin_manager.add_all_available(plugins_registry=plugins_registry) validate_plugins_presence_in_conf( plugin_manager, plugins_registry, present=True) # Validates all plugins are no longer in the configuration file plugin_manager.remove_all() validate_plugins_presence_in_conf( plugin_manager, plugins_registry, present=False) def test_git_plugin_update(git_plugin_manager_fixture): """Tests the git plugin update functionality Tests the following: 1. Plugin update without new changes 2. Plugin update to an older commit 3. No update when there are local changes 4. Switch back to master after checking out old revision 5. Switch to revision that does not exists :param git_plugin_manager_fixture: Fixture object which yields InfraredPluginManger object with git plugin installed """ gpm = git_plugin_manager_fixture repo = git.Repo(gpm.get_plugin('git_plugin').path) commits_list = repo.git.rev_list('HEAD').splitlines() assert len(commits_list) > 1, \ "Can perform the test without at least two commits" # Plugin update without new changes assert gpm.update_plugin('git_plugin') is None, \ "Failed to pull changes from remote with up-to-date local branch" # Plugin update to an older commit gpm.update_plugin(plugin_name='git_plugin', revision=commits_list[-1]) assert commits_list[-1] == repo.git.rev_parse('HEAD'), \ "Failed to Update plugin to: {}".format(commits_list[-1]) # No update when there are local changes file_name = os.path.join(repo.working_dir, 'test.txt') # create new file and add it to git to create local changes with open(file_name, 'w') as f: f.write('test') repo.git.add([file_name]) with pytest.raises(IRFailedToUpdatePlugin): gpm.update_plugin(plugin_name='git_plugin') assert commits_list[-1] == repo.git.rev_parse('HEAD'), \ "Plugin wasn't supposed to be changed when update failed..." # Switch back to master after checking out old revision gpm.update_plugin(plugin_name='git_plugin', revision='master', hard_reset=True) assert commits_list[0] == repo.git.rev_parse('HEAD'), \ "Plugin haven't been updated from '{}' to '{}'".format( commits_list[-1], commits_list[0]) # Switch to revision that does not exists branch_before = repo.active_branch with pytest.raises(IRFailedToUpdatePlugin): gpm.update_plugin(plugin_name='git_plugin', revision='not_exists_rev') assert branch_before == repo.active_branch, \ "Plugin's revision wasn't supposed to change" @pytest.mark.parametrize("description, registry_yaml", [ ('no_type', { 'some_plugin_name': { 'src': '/path/to/plugin', 'desc': 'some plugin description' } }), ('no_desc', { 'some_plugin_name': { 'src': '/path/to/plugin', 'type': 'supported_type' } }), ('no_src', { 'some_plugin_name': { 'desc': 'some plugin description', 'type': 'supported_type' } }), ('empty_revision', { 'some_plugin_name': { 'src': '/path/to/plugin', 'type': 'supported_type', 'desc': 'some plugin description', 'rev': '' } }), ('empty_src_path', { 'some_plugin_name': { 'src': '/path/to/plugin', 'type': 'supported_type', 'desc': 'some plugin description', 'src_path': '' } }), ('empty_plugin_key', { '': { 'src': '/path/to/plugin', 'type': 'supported_type', 'desc': 'some plugin description', 'src_path': '' } }), ('additional_not_allowed_param', { '': { 'src': '/path/to/plugin', 'type': 'supported_type', 'desc': 'some plugin description', 'src_path': '/relative/path', 'rev': 'some_rev', 'not_allowed_additional_key': 'some_value' } }), ]) def test_import_plugins_corrupted_registry(tmpdir_factory, description, registry_yaml): """ Tests that it's not possible to import plugins with invalid registry file :param tmpdir_factory: pytest builtin fixture for creating temp dirs :param description: test description (adds a description in pytest run) :param registry_yaml: dictionary with data for registry file :return: """ lp_dir = tmpdir_factory.mktemp('test_tmp_dir') lp_file = lp_dir.join('registry.yaml') with open(lp_file.strpath, 'w') as fp: yaml.dump(registry_yaml, fp, default_flow_style=True) try: with pytest.raises(IRValidatorException): RegistryValidator.validate_from_file(lp_file.strpath) finally: lp_dir.remove() def test_import_plugins_from_registry(tmpdir, plugin_manager_fixture): """ Test that plugins import actually imports the plugins specified in the registry file supplied :param tmpdir: pytest builtin fixture for creating temp dirs :param plugin_manager_fixture: Fixture object which yields """ plugin_manager = plugin_manager_fixture() plugins_registry = os.path.join(SAMPLE_PLUGINS_DIR, "registry_example.yml") with open(plugins_registry) as fp: registry_yaml = yaml.safe_load(fp) # prepare tmp library folder to hold the dependencies tmp_pluginss_dir = str(tmpdir.mkdir("tmp_pluginss_dir")) plugin_manager.plugins_dir = tmp_pluginss_dir # Validates that plugins aren't in configuration file from the beginning validate_plugins_presence_in_conf( plugin_manager, registry_yaml, present=False) # import all plugins from registry plugin_manager.import_plugins(plugins_registry) # check that plugins were copied to the plugins directory assert os.path.isdir(os.path.join( tmp_pluginss_dir, 'type1_plugin1')) assert os.path.isdir(os.path.join( tmp_pluginss_dir, 'type2_plugin1')) assert os.path.isdir(os.path.join( tmp_pluginss_dir, 'type1_plugin2')) # Validates all plugins are in the configuration file validate_plugins_presence_in_conf( plugin_manager, registry_yaml, present=True) def test_add_plugin_with_src_path(plugin_manager_fixture, mocker): """ Validates that add plugin copies the whole directory and only reference to the plugin inside the directory :param plugin_manager_fixture: Fixture object which yields InfraredPluginManger object :param mocker: mocker fixture """ def clone_from_side_effect(url, to_path, **kwargs): """ Define a side effect function to override the original behaviour of clone_from """ shutil.copytree(src=plugin_src, dst=to_path) return to_path plugin_manager = plugin_manager_fixture() mock_git = mocker.patch("infrared.core.services.plugins.git.Repo") # use side effect to use copytree instead of original clone mock_git.clone_from.side_effect = clone_from_side_effect plugin_src = os.path.abspath(os.path.join(SAMPLE_PLUGINS_DIR, "plugin_with_src_path")) # add_plugin call plugin_manager.add_plugin( plugin_source="https://sample_github.null/plugin_repo.git", plugin_src_path="infrared_plugin") plugin = plugin_manager.get_plugin("plugin_with_src_path") expected_plugin_path = os.path.join(plugin_manager.plugins_dir, "plugin_with_src_path") expected_plugin_src_path = \ os.path.join(expected_plugin_path, "infrared_plugin") assert expected_plugin_src_path == plugin.path, \ "Plugin path is not as expected" # compare the dirs before and after to make sure we copied it entirely dirs_cmp = filecmp.dircmp(plugin_src, expected_plugin_path) assert dirs_cmp.right_list == dirs_cmp.left_list, \ "Plugin directory is does not contain the original files from " \ "the original plugin source."
apache-2.0
jesusmariocalleja/projecte-gdsa-2014
read.py
1
1880
# -*- coding: utf-8 -*- import sys import csv import glob # This lines increases the csv's line max size csv.field_size_limit(sys.maxsize) def readImagesFileNames(_path): imagesList = [] imgL = glob.glob(_path) for item in imgL: item = item.split('/') item = item[1].split('.') item = item[0] imagesList.append(item) return imagesList def writeTags(_iFileName, _oFileName): oFile = open(_oFileName, 'wb') wr = csv.writer(oFile) with open(_iFileName) as f: reader = csv.reader(f) for row in reader: image_id = str(row).strip('[ \' ]').split(r' ') image_id = image_id[0] for image in evalImages: if image == image_id: wr.writerow(row) break def writeGroundTruth(_iFileName, _oFileName): oFile = open(_oFileName, 'wb') wr = csv.writer(oFile) with open(_iFileName, 'rb') as f: reader = csv.reader(f) for row in reader: image_id = str(row).strip('[ \' ]').split(r' ') image_id = image_id[0] for image in evalImages: if image == image_id: wr.writerow(row) break ###################### #### MAIN PROGRAM #### ###################### #FILES VARS images_path = "images/*" main_id_tag_filename = "id_tag/document_id_tag.csv" eval_id_tag_filename = "id_tag/evaluable_document_id_tag.csv" main_groundtruth_filename = "groundtruth/groundtruth.csv" eval_groundtruth_filename = "groundtruth/evaluable_groundtruth.csv" print("Reading image names...") evalImages = readImagesFileNames(images_path) print("Writing image-tags file...") writeTags(main_id_tag_filename, eval_id_tag_filename) print("Writing groundtruth file...") writeGroundTruth(main_groundtruth_filename, eval_groundtruth_filename)
gpl-3.0
cheng--zhang/xen-api
scripts/poweron/power-on.py
34
2200
#!/usr/bin/env python # Example script which shows how to use the XenAPI to find a particular Host's management interface # and send it a wake-on-LAN packet. import subprocess, sys, socket, struct, time, syslog import XenAPI, inventory import XenAPIPlugin class HOST_POWER_ON_NOT_CONFIGURED(Exception): """Base Exception class for all transfer plugin errors.""" def __init__(self, *args): Exception.__init__(self, *args) def waitForXapi(session,host): attempts = 0 finished = False metrics = None while not finished and (attempts < 120): attempts = attempts + 1 time.sleep(5) metrics = session.xenapi.host.get_metrics(host) try: finished = session.xenapi.host_metrics.get_live(metrics) except: pass return str(finished) def main(session, args): remote_host_uuid = args['remote_host_uuid'] # Find the remote Host remote_host = session.xenapi.host.get_by_uuid(remote_host_uuid) # Find the power_on_mode mode = session.xenapi.host.get_power_on_mode(remote_host) power_on_config = session.xenapi.host.get_power_on_config(remote_host) if mode == "iLO" or mode=="DRAC" : ip=power_on_config['power_on_ip'] user = power_on_config['power_on_user'] secret = power_on_config['power_on_password_secret'] secretref=session.xenapi.secret.get_by_uuid(secret) password = session.xenapi.secret.get_value(secretref) if mode == "iLO": modu= __import__('iLO') modu.iLO( ip, user, password) else: modu= __import__('DRAC') modu.DRAC( ip, user, password) return waitForXapi(session,remote_host) elif mode=="wake-on-lan": modu= __import__('wlan') return modu.wake_on_lan(session, remote_host, remote_host_uuid) # Custom script elif mode!="": modu= __import__(mode) modu.custom(session,remote_host,power_on_config) return waitForXapi(session,remote_host) # Disabled else: raise HOST_POWER_ON_NOT_CONFIGURED() if __name__ == "__main__": XenAPIPlugin.dispatch({"main": main})
lgpl-2.1
nathanaevitas/odoo
openerp/addons/resource/faces/plocale.py
433
1910
############################################################################ # Copyright (C) 2005 by Reithinger GmbH # mreithinger@web.de # # This file is part of faces. # # faces is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # faces is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ############################################################################ import gettext import os.path import locale import sys def _get_translation(): try: return gettext.translation("faces") except: try: if sys.frozen: path = os.path.dirname(sys.argv[0]) path = os.path.join(path, "resources", "faces", "locale") else: path = os.path.split(__file__)[0] path = os.path.join(path, "locale") return gettext.translation("faces", path) except Exception, e: return None def get_gettext(): trans = _get_translation() if trans: return trans.ugettext return lambda msg: msg def get_encoding(): trans = _get_translation() if trans: return trans.charset() return locale.getpreferredencoding() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Kalimaha/pact-test
tests/runners/service_providers/test_suite.py
1
1344
import os from pact_test.either import * from pact_test.config.config_builder import Config from pact_test.runners.service_providers.test_suite import ServiceProviderTestSuiteRunner # nopep8 def test_empty_tests_list(monkeypatch): config = Config() config.provider_tests_path = os.path.join(os.getcwd(), 'tests', 'resources', 'service_providers') def empty_list(_): return [] monkeypatch.setattr(os, 'listdir', empty_list) t = ServiceProviderTestSuiteRunner(config) msg = 'There are no provider tests to verify.' assert t.collect_tests().value == msg def test_collect_tests(): config = Config() config.provider_tests_path = os.path.join(os.getcwd(), 'tests', 'resources', 'service_providers') t = ServiceProviderTestSuiteRunner(config) tests = t.collect_tests().value assert len(tests) == 2 def test_missing_test_directory(): config = Config() config.provider_tests_path = os.path.join(os.getcwd(), 'spam') t = ServiceProviderTestSuiteRunner(config) result = t.verify() assert type(result) is Left assert result.value.startswith("[Errno 2] No such file or directory:")
mit
flgiordano/netcash
+/google-cloud-sdk/lib/surface/billing/accounts/projects/unlink.py
7
1638
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command to disable billing.""" import textwrap from googlecloudsdk.api_lib.billing import utils from googlecloudsdk.calliope import base class Unlink(base.Command): """Unlink the account (if any) linked with a project.""" detailed_help = { 'DESCRIPTION': textwrap.dedent( """ This command unlinks a project from it's linked billing account. This disables billing on the project. """ ) } @staticmethod def Args(parser): parser.add_argument('project_id', **utils.PROJECT_ID_ARG_PARAMS) def Run(self, args): billing = self.context['billing_client'] messages = self.context['billing_messages'] result = billing.projects.UpdateBillingInfo( messages.CloudbillingProjectsUpdateBillingInfoRequest( name='projects/{project_id}'.format( project_id=args.project_id, ), projectBillingInfo=messages.ProjectBillingInfo( billingAccountName='', ), ) ) return result
bsd-3-clause
nuobit/website
website_menu_by_user_status/__init__.py
37
1046
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2010 - 2014 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import models
agpl-3.0
mohitsethi/packstack
packstack/modules/common.py
6
1277
# -*- coding: utf-8 -*- from ..installer import utils def filtered_hosts(config, exclude=True, dbhost=True): """ Returns list of hosts which need installation taking into account CONFIG_MYSQL_INSTAL if parameter dbhost is True and EXCLUDE_SERVERS if parameter exclude is True. """ exclset = set([i.strip() for i in config.get('EXCLUDE_SERVERS', '').split(',') if i.strip()]) result = set() dbinst = config.get('CONFIG_MYSQL_INSTALL') == 'y' for hosttype, hostname in utils.host_iter(config): # if dbhost is being taken into account and we are not installing MySQL # then we should omit the MySQL host if dbhost and not dbinst and hosttype == 'CONFIG_MYSQL_HOST': continue result.add(hostname) if exclude: result = result - exclset return result def is_all_in_one(config): """ Returns True if packstack is running allinone setup, otherwise returns False. """ # Even if some host have been excluded from installation, we must count # with them when checking all-in-one. MySQL host should however be omitted # if we are not installing MySQL return len(filtered_hosts(config, exclude=False, dbhost=True)) == 1
apache-2.0
Technorip/Myntra
Django Backend/myntra/env/lib/python2.7/site-packages/django/contrib/gis/sitemaps/kml.py
55
2543
from django.apps import apps from django.contrib.gis.db.models.fields import GeometryField from django.contrib.sitemaps import Sitemap from django.core import urlresolvers from django.db import models class KMLSitemap(Sitemap): """ A minimal hook to produce KML sitemaps. """ geo_format = 'kml' def __init__(self, locations=None): # If no locations specified, then we try to build for # every model in installed applications. self.locations = self._build_kml_sources(locations) def _build_kml_sources(self, sources): """ Goes through the given sources and returns a 3-tuple of the application label, module name, and field name of every GeometryField encountered in the sources. If no sources are provided, then all models. """ kml_sources = [] if sources is None: sources = apps.get_models() for source in sources: if isinstance(source, models.base.ModelBase): for field in source._meta.fields: if isinstance(field, GeometryField): kml_sources.append((source._meta.app_label, source._meta.model_name, field.name)) elif isinstance(source, (list, tuple)): if len(source) != 3: raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).') kml_sources.append(source) else: raise TypeError('KML Sources must be a model or a 3-tuple.') return kml_sources def get_urls(self, page=1, site=None, protocol=None): """ This method is overridden so the appropriate `geo_format` attribute is placed on each URL element. """ urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol) for url in urls: url['geo_format'] = self.geo_format return urls def items(self): return self.locations def location(self, obj): return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format, kwargs={'label': obj[0], 'model': obj[1], 'field_name': obj[2], } ) class KMZSitemap(KMLSitemap): geo_format = 'kmz'
gpl-2.0
jevonearth/frappe
frappe/client.py
12
4791
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from frappe import _ import frappe.model import frappe.utils import json, os @frappe.whitelist() def get_list(doctype, fields=None, filters=None, order_by=None, limit_start=None, limit_page_length=20): return frappe.get_list(doctype, fields=fields, filters=filters, order_by=order_by, limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=False) @frappe.whitelist() def get(doctype, name=None, filters=None): if filters and not name: name = frappe.db.get_value(doctype, json.loads(filters)) if not name: raise Exception, "No document found for given filters" doc = frappe.get_doc(doctype, name) if not doc.has_permission("read"): raise frappe.PermissionError return frappe.get_doc(doctype, name).as_dict() @frappe.whitelist() def get_value(doctype, fieldname, filters=None, as_dict=True, debug=False): if not frappe.has_permission(doctype): frappe.throw(_("Not permitted"), frappe.PermissionError) try: filters = json.loads(filters) except ValueError: # name passed, not json pass try: fieldname = json.loads(fieldname) except ValueError: # name passed, not json pass return frappe.db.get_value(doctype, filters, fieldname, as_dict=as_dict, debug=debug) @frappe.whitelist() def set_value(doctype, name, fieldname, value): if fieldname!="idx" and fieldname in frappe.model.default_fields: frappe.throw(_("Cannot edit standard fields")) doc = frappe.db.get_value(doctype, name, ["parenttype", "parent"], as_dict=True) if doc and doc.parent and doc.parenttype: doc = frappe.get_doc(doc.parenttype, doc.parent) child = doc.getone({"doctype": doctype, "name": name}) child.set(fieldname, value) else: doc = frappe.get_doc(doctype, name) df = doc.meta.get_field(fieldname) if df.fieldtype == "Read Only" or df.read_only: frappe.throw(_("Can not edit Read Only fields")) else: doc.set(fieldname, value) doc.save() return doc.as_dict() @frappe.whitelist() def insert(doc=None): if isinstance(doc, basestring): doc = json.loads(doc) if doc.get("parent") and doc.get("parenttype"): # inserting a child record parent = frappe.get_doc(doc.parenttype, doc.parent) parent.append(doc) parent.save() return parent.as_dict() else: doc = frappe.get_doc(doc).insert() return doc.as_dict() @frappe.whitelist() def save(doc): if isinstance(doc, basestring): doc = json.loads(doc) doc = frappe.get_doc(doc).save() return doc.as_dict() @frappe.whitelist() def rename_doc(doctype, old_name, new_name, merge=False): new_name = frappe.rename_doc(doctype, old_name, new_name, merge=merge) return new_name @frappe.whitelist() def submit(doc): if isinstance(doc, basestring): doc = json.loads(doc) doc = frappe.get_doc(doc) doc.submit() return doc.as_dict() @frappe.whitelist() def cancel(doctype, name): wrapper = frappe.get_doc(doctype, name) wrapper.cancel() return wrapper.as_dict() @frappe.whitelist() def delete(doctype, name): frappe.delete_doc(doctype, name) @frappe.whitelist() def set_default(key, value, parent=None): """set a user default value""" frappe.db.set_default(key, value, parent or frappe.session.user) frappe.clear_cache(user=frappe.session.user) @frappe.whitelist() def make_width_property_setter(doc): if isinstance(doc, basestring): doc = json.loads(doc) if doc["doctype"]=="Property Setter" and doc["property"]=="width": frappe.get_doc(doc).insert(ignore_permissions = True) @frappe.whitelist() def bulk_update(docs): docs = json.loads(docs) failed_docs = [] for doc in docs: try: ddoc = {key: val for key, val in doc.iteritems() if key not in ['doctype', 'docname']} doctype = doc['doctype'] docname = doc['docname'] doc = frappe.get_doc(doctype, docname) doc.update(ddoc) doc.save() except: failed_docs.append({ 'doc': doc, 'exc': frappe.utils.get_traceback() }) return {'failed_docs': failed_docs} @frappe.whitelist() def has_permission(doctype, docname, perm_type="read"): # perm_type can be one of read, write, create, submit, cancel, report return {"has_permission": frappe.has_permission(doctype, perm_type.lower(), docname)} @frappe.whitelist() def get_js(src): src = src.strip("/").split("/") if ".." in src: frappe.throw(_("Invalid file path: {0}").format("/".join(src))) contentpath = os.path.join(frappe.local.sites_path, *src) with open(contentpath, "r") as srcfile: code = frappe.utils.cstr(srcfile.read()) if frappe.local.lang != "en": messages = frappe.get_lang_dict("jsfile", contentpath) messages = json.dumps(messages) code += "\n\n$.extend(frappe._messages, {})".format(messages) return code
mit
jostep/tensorflow
tensorflow/contrib/kernel_methods/python/kernel_estimators_test.py
126
12248
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for kernel_estimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import layers from tensorflow.contrib.kernel_methods.python import kernel_estimators from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.python.framework import constant_op from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework.test_util import TensorFlowTestCase from tensorflow.python.platform import googletest def _linearly_separable_binary_input_fn(): """Returns linearly-separable data points (binary classification).""" return { 'feature1': constant_op.constant([[0.0], [1.0], [3.0]]), 'feature2': constant_op.constant([[1.0], [-1.2], [1.0]]), }, constant_op.constant([[1], [0], [1]]) def _linearly_inseparable_binary_input_fn(): """Returns non-linearly-separable data points (binary classification).""" return { 'multi_dim_feature': constant_op.constant([[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0], [-1.0, 1.0]]), }, constant_op.constant([[1], [0], [1], [0]]) class KernelLinearClassifierTest(TensorFlowTestCase): def testNoFeatureColumnsOrKernelMappers(self): """Tests that at least one of feature columns or kernels is provided.""" with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier() def testInvalidKernelMapper(self): """ValueError raised when the kernel mappers provided have invalid type.""" class DummyKernelMapper(object): def __init__(self): pass feature = layers.real_valued_column('feature') kernel_mappers = {feature: [DummyKernelMapper()]} with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier( feature_columns=[feature], kernel_mappers=kernel_mappers) def testInvalidNumberOfClasses(self): """ValueError raised when the kernel mappers provided have invalid type.""" feature = layers.real_valued_column('feature') with self.assertRaises(ValueError): _ = kernel_estimators.KernelLinearClassifier( feature_columns=[feature], n_classes=1) def testLinearlySeparableBinaryDataNoKernels(self): """Tests classifier w/o kernels (log. regression) for lin-separable data.""" feature1 = layers.real_valued_column('feature1') feature2 = layers.real_valued_column('feature2') logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[feature1, feature2]) logreg_classifier.fit( input_fn=_linearly_separable_binary_input_fn, steps=100) metrics = logreg_classifier.evaluate( input_fn=_linearly_separable_binary_input_fn, steps=1) # Since the data is linearly separable, the classifier should have small # loss and perfect accuracy. self.assertLess(metrics['loss'], 0.1) self.assertEqual(metrics['accuracy'], 1.0) # As a result, it should assign higher probability to class 1 for the 1st # and 3rd example and higher probability to class 0 for the second example. logreg_prob_predictions = list( logreg_classifier.predict_proba(input_fn= _linearly_separable_binary_input_fn)) self.assertGreater(logreg_prob_predictions[0][1], 0.5) self.assertGreater(logreg_prob_predictions[1][0], 0.5) self.assertGreater(logreg_prob_predictions[2][1], 0.5) def testLinearlyInseparableBinaryDataWithAndWithoutKernels(self): """Tests classifier w/ and w/o kernels on non-linearly-separable data.""" multi_dim_feature = layers.real_valued_column( 'multi_dim_feature', dimension=2) # Data points are non-linearly separable so there will be at least one # mis-classified sample (accuracy < 0.8). In fact, the loss is minimized for # w1=w2=0.0, in which case each example incurs a loss of ln(2). The overall # (average) loss should then be ln(2) and the logits should be approximately # 0.0 for each sample. logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[multi_dim_feature]) logreg_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) logreg_metrics = logreg_classifier.evaluate( input_fn=_linearly_inseparable_binary_input_fn, steps=1) logreg_loss = logreg_metrics['loss'] logreg_accuracy = logreg_metrics['accuracy'] logreg_predictions = logreg_classifier.predict( input_fn=_linearly_inseparable_binary_input_fn, as_iterable=False) self.assertAlmostEqual(logreg_loss, np.log(2), places=3) self.assertLess(logreg_accuracy, 0.8) self.assertAllClose(logreg_predictions['logits'], [[0.0], [0.0], [0.0], [0.0]]) # Using kernel mappers allows to discover non-linearities in data. Mapping # the data to a higher dimensional feature space using approx RBF kernels, # substantially reduces the loss and leads to perfect classification # accuracy. kernel_mappers = { multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernelized_logreg_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], kernel_mappers=kernel_mappers) kernelized_logreg_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) kernelized_logreg_metrics = kernelized_logreg_classifier.evaluate( input_fn=_linearly_inseparable_binary_input_fn, steps=1) kernelized_logreg_loss = kernelized_logreg_metrics['loss'] kernelized_logreg_accuracy = kernelized_logreg_metrics['accuracy'] self.assertLess(kernelized_logreg_loss, 0.2) self.assertEqual(kernelized_logreg_accuracy, 1.0) def testVariablesWithAndWithoutKernels(self): """Tests variables w/ and w/o kernel.""" multi_dim_feature = layers.real_valued_column( 'multi_dim_feature', dimension=2) linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[multi_dim_feature]) linear_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) linear_variables = linear_classifier.get_variable_names() self.assertIn('linear/multi_dim_feature/weight', linear_variables) self.assertIn('linear/bias_weight', linear_variables) linear_weights = linear_classifier.get_variable_value( 'linear/multi_dim_feature/weight') linear_bias = linear_classifier.get_variable_value('linear/bias_weight') kernel_mappers = { multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], kernel_mappers=kernel_mappers) kernel_linear_classifier.fit( input_fn=_linearly_inseparable_binary_input_fn, steps=50) kernel_linear_variables = kernel_linear_classifier.get_variable_names() self.assertIn('linear/multi_dim_feature_MAPPED/weight', kernel_linear_variables) self.assertIn('linear/bias_weight', kernel_linear_variables) kernel_linear_weights = kernel_linear_classifier.get_variable_value( 'linear/multi_dim_feature_MAPPED/weight') kernel_linear_bias = kernel_linear_classifier.get_variable_value( 'linear/bias_weight') # The feature column used for linear classification (no kernels) has # dimension 2 so the model will learn a 2-dimension weights vector (and a # scalar for the bias). In the kernelized model, the features are mapped to # a 30-dimensional feature space and so the weights variable will also have # dimension 30. self.assertEqual(2, len(linear_weights)) self.assertEqual(1, len(linear_bias)) self.assertEqual(30, len(kernel_linear_weights)) self.assertEqual(1, len(kernel_linear_bias)) def testClassifierWithAndWithoutKernelsNoRealValuedColumns(self): """Tests kernels have no effect for non-real valued columns .""" def input_fn(): return { 'price': constant_op.constant([[0.4], [0.6], [0.3]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), }, constant_op.constant([[1], [0], [1]]) price = layers.real_valued_column('price') country = layers.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[price, country]) linear_classifier.fit(input_fn=input_fn, steps=100) linear_metrics = linear_classifier.evaluate(input_fn=input_fn, steps=1) linear_loss = linear_metrics['loss'] linear_accuracy = linear_metrics['accuracy'] kernel_mappers = { country: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[price, country], kernel_mappers=kernel_mappers) kernel_linear_classifier.fit(input_fn=input_fn, steps=100) kernel_linear_metrics = kernel_linear_classifier.evaluate( input_fn=input_fn, steps=1) kernel_linear_loss = kernel_linear_metrics['loss'] kernel_linear_accuracy = kernel_linear_metrics['accuracy'] # The kernel mapping is applied to a non-real-valued feature column and so # it should have no effect on the model. The loss and accuracy of the # "kernelized" model should match the loss and accuracy of the initial model # (without kernels). self.assertAlmostEqual(linear_loss, kernel_linear_loss, delta=0.01) self.assertAlmostEqual(linear_accuracy, kernel_linear_accuracy, delta=0.01) def testMulticlassDataWithAndWithoutKernels(self): """Tests classifier w/ and w/o kernels on multiclass data.""" feature_column = layers.real_valued_column('feature', dimension=4) # Metrics for linear classifier (no kernels). linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[feature_column], n_classes=3) linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50) linear_metrics = linear_classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=1) linear_loss = linear_metrics['loss'] linear_accuracy = linear_metrics['accuracy'] # Using kernel mappers allows to discover non-linearities in data (via RBF # kernel approximation), reduces loss and increases accuracy. kernel_mappers = { feature_column: [ RandomFourierFeatureMapper( input_dim=4, output_dim=50, stddev=1.0, name='rffm') ] } kernel_linear_classifier = kernel_estimators.KernelLinearClassifier( feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers) kernel_linear_classifier.fit( input_fn=test_data.iris_input_multiclass_fn, steps=50) kernel_linear_metrics = kernel_linear_classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=1) kernel_linear_loss = kernel_linear_metrics['loss'] kernel_linear_accuracy = kernel_linear_metrics['accuracy'] self.assertLess(kernel_linear_loss, linear_loss) self.assertGreater(kernel_linear_accuracy, linear_accuracy) if __name__ == '__main__': googletest.main()
apache-2.0
patmcb/odoo
openerp/addons/base/tests/test_ir_filters.py
285
11000
# -*- coding: utf-8 -*- import functools from openerp import exceptions from openerp.tests import common def noid(d): """ Removes values that are not relevant for the test comparisons """ d.pop('id', None) d.pop('action_id', None) return d class FiltersCase(common.TransactionCase): def build(self, model, *args): Model = self.registry(model) for vars in args: Model.create(self.cr, common.ADMIN_USER_ID, vars, {}) class TestGetFilters(FiltersCase): def setUp(self): super(TestGetFilters, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_own_filters(self): self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', user_id=self.USER_ID, model_id='ir.filters'), dict(name='c', user_id=self.USER_ID, model_id='ir.filters'), dict(name='d', user_id=self.USER_ID, model_id='ir.filters')) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='b', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='d', is_default=False, user_id=self.USER, domain='[]', context='{}'), ]) def test_global_filters(self): self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=False, model_id='ir.filters'), dict(name='c', user_id=False, model_id='ir.filters'), dict(name='d', user_id=False, model_id='ir.filters'), ) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='b', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='d', is_default=False, user_id=False, domain='[]', context='{}'), ]) def test_no_third_party_filters(self): self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=common.ADMIN_USER_ID, model_id='ir.filters'), dict(name='c', user_id=self.USER_ID, model_id='ir.filters'), dict(name='d', user_id=common.ADMIN_USER_ID, model_id='ir.filters') ) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'), ]) class TestOwnDefaults(FiltersCase): def setUp(self): super(TestOwnDefaults, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_new_no_filter(self): """ When creating a @is_default filter with no existing filter, that new filter gets the default flag """ Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}') ]) def test_new_filter_not_default(self): """ When creating a @is_default filter with existing non-default filters, the new filter gets the flag """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'), ]) def test_new_filter_existing_default(self): """ When creating a @is_default filter where an existing filter is already @is_default, the flag should be *moved* from the old to the new filter """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'), ]) def test_update_filter_set_default(self): """ When updating an existing filter to @is_default, if an other filter already has the flag the flag should be moved """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), ]) class TestGlobalDefaults(FiltersCase): def setUp(self): super(TestGlobalDefaults, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_new_filter_not_default(self): """ When creating a @is_default filter with existing non-default filters, the new filter gets the flag """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'), ]) def test_new_filter_existing_default(self): """ When creating a @is_default filter where an existing filter is already @is_default, an error should be generated """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') with self.assertRaises(exceptions.Warning): Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) def test_update_filter_set_default(self): """ When updating an existing filter to @is_default, if an other filter already has the flag an error should be generated """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') with self.assertRaises(exceptions.Warning): Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) def test_update_default_filter(self): """ Replacing the current default global filter should not generate any error """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') context_value = "{'some_key': True}" Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'b', 'model_id': 'ir.filters', 'user_id': False, 'context': context_value, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value), ])
agpl-3.0
dnozay/lettuce
tests/integration/lib/Django-1.2.5/django/forms/widgets.py
45
29994
""" HTML Widget classes """ import django.utils.copycompat as copy from itertools import chain from django.conf import settings from django.utils.datastructures import MultiValueDict, MergeDict from django.utils.html import escape, conditional_escape from django.utils.translation import ugettext from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe from django.utils import datetime_safe, formats import time import datetime from util import flatatt from urlparse import urljoin __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', ) MEDIA_TYPES = ('css','js') class Media(StrAndUnicode): def __init__(self, media=None, **kwargs): if media: media_attrs = media.__dict__ else: media_attrs = kwargs self._css = {} self._js = [] for name in MEDIA_TYPES: getattr(self, 'add_' + name)(media_attrs.get(name, None)) # Any leftover attributes must be invalid. # if media_attrs != {}: # raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys())) def __unicode__(self): return self.render() def render(self): return mark_safe(u'\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES]))) def render_js(self): return [u'<script type="text/javascript" src="%s"></script>' % self.absolute_path(path) for path in self._js] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = self._css.keys() media.sort() return chain(*[ [u'<link href="%s" type="text/css" media="%s" rel="stylesheet" />' % (self.absolute_path(path), medium) for path in self._css[medium]] for medium in media]) def absolute_path(self, path): if path.startswith(u'http://') or path.startswith(u'https://') or path.startswith(u'/'): return path return urljoin(settings.MEDIA_URL,path) def __getitem__(self, name): "Returns a Media object that only contains media of the given type" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) def add_js(self, data): if data: for path in data: if path not in self._js: self._js.append(path) def add_css(self, data): if data: for medium, paths in data.items(): for path in paths: if not self._css.get(medium) or path not in self._css[medium]: self._css.setdefault(medium, []).append(path) def __add__(self, other): combined = Media() for name in MEDIA_TYPES: getattr(combined, 'add_' + name)(getattr(self, '_' + name, None)) getattr(combined, 'add_' + name)(getattr(other, '_' + name, None)) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists if hasattr(super(cls, self), 'media'): base = super(cls, self).media else: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend == True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) else: return Media(definition) else: return base return property(_media) class MediaDefiningClass(type): "Metaclass for classes that can have media definitions" def __new__(cls, name, bases, attrs): new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(object): __metaclass__ = MediaDefiningClass is_hidden = False # Determines whether this corresponds to an <input type="hidden">. needs_multipart_form = False # Determines does this widget need multipart-encrypted form is_localized = False def __init__(self, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj def render(self, name, value, attrs=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ raise NotImplementedError def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." attrs = dict(self.attrs, **kwargs) if extra_attrs: attrs.update(extra_attrs) return attrs def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, returns the value of this widget. Returns None if it's not provided. """ return data.get(name, None) def _has_changed(self, initial, data): """ Return True if data differs from initial. """ # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or inital value we get # is None, replace it w/ u''. if data is None: data_value = u'' else: data_value = data if initial is None: initial_value = u'' else: initial_value = initial if force_unicode(initial_value) != force_unicode(data_value): return True return False def id_for_label(self, id_): """ Returns the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Returns None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ id_for_label = classmethod(id_for_label) class Input(Widget): """ Base class for all <input> widgets (except type='checkbox' and type='radio', which are special). """ input_type = None # Subclasses must define this. def _format_value(self, value): if self.is_localized: return formats.localize_input(value) return value def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) if value != '': # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(self._format_value(value)) return mark_safe(u'<input%s />' % flatatt(final_attrs)) class TextInput(Input): input_type = 'text' class PasswordInput(Input): input_type = 'password' def __init__(self, attrs=None, render_value=True): super(PasswordInput, self).__init__(attrs) self.render_value = render_value def render(self, name, value, attrs=None): if not self.render_value: value=None return super(PasswordInput, self).render(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' is_hidden = True class MultipleHiddenInput(HiddenInput): """ A widget that handles <input type="hidden"> for fields that have a list of values. """ def __init__(self, attrs=None, choices=()): super(MultipleHiddenInput, self).__init__(attrs) # choices can be any iterable self.choices = choices def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) id_ = final_attrs.get('id', None) inputs = [] for i, v in enumerate(value): input_attrs = dict(value=force_unicode(v), **final_attrs) if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. input_attrs['id'] = '%s_%s' % (id_, i) inputs.append(u'<input%s />' % flatatt(input_attrs)) return mark_safe(u'\n'.join(inputs)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) class FileInput(Input): input_type = 'file' needs_multipart_form = True def render(self, name, value, attrs=None): return super(FileInput, self).render(name, None, attrs=attrs) def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name, None) def _has_changed(self, initial, data): if data is None: return False return True class Textarea(Widget): def __init__(self, attrs=None): # The 'rows' and 'cols' attributes are required for HTML correctness. default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super(Textarea, self).__init__(default_attrs) def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), conditional_escape(force_unicode(value)))) class DateInput(Input): input_type = 'text' format = '%Y-%m-%d' # '2006-10-25' def __init__(self, attrs=None, format=None): super(DateInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATE_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_date(value) return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('DATE_INPUT_FORMATS')[0] initial = datetime.date(*time.strptime(initial, input_format)[:3]) except (TypeError, ValueError): pass return super(DateInput, self)._has_changed(self._format_value(initial), data) class DateTimeInput(Input): input_type = 'text' format = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59' def __init__(self, attrs=None, format=None): super(DateTimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_datetime(value) return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('DATETIME_INPUT_FORMATS')[0] initial = datetime.datetime(*time.strptime(initial, input_format)[:6]) except (TypeError, ValueError): pass return super(DateTimeInput, self)._has_changed(self._format_value(initial), data) class TimeInput(Input): input_type = 'text' format = '%H:%M:%S' # '14:30:59' def __init__(self, attrs=None, format=None): super(TimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('TIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('TIME_INPUT_FORMATS')[0] initial = datetime.time(*time.strptime(initial, input_format)[3:6]) except (TypeError, ValueError): pass return super(TimeInput, self)._has_changed(self._format_value(initial), data) class CheckboxInput(Widget): def __init__(self, attrs=None, check_test=bool): super(CheckboxInput, self).__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = check_test def render(self, name, value, attrs=None): final_attrs = self.build_attrs(attrs, type='checkbox', name=name) try: result = self.check_test(value) except: # Silently catch exceptions result = False if result: final_attrs['checked'] = 'checked' if value not in ('', True, False, None): # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(value) return mark_safe(u'<input%s />' % flatatt(final_attrs)) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, basestring): value = values.get(value.lower(), value) return value def _has_changed(self, initial, data): # Sometimes data or initial could be None or u'' which should be the # same thing as False. return bool(initial) != bool(data) class Select(Widget): def __init__(self, attrs=None, choices=()): super(Select, self).__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def render(self, name, value, attrs=None, choices=()): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) output = [u'<select%s>' % flatatt(final_attrs)] options = self.render_options(choices, [value]) if options: output.append(options) output.append(u'</select>') return mark_safe(u'\n'.join(output)) def render_option(self, selected_choices, option_value, option_label): option_value = force_unicode(option_value) selected_html = (option_value in selected_choices) and u' selected="selected"' or '' return u'<option value="%s"%s>%s</option>' % ( escape(option_value), selected_html, conditional_escape(force_unicode(option_label))) def render_options(self, choices, selected_choices): # Normalize to strings. selected_choices = set([force_unicode(v) for v in selected_choices]) output = [] for option_value, option_label in chain(self.choices, choices): if isinstance(option_label, (list, tuple)): output.append(u'<optgroup label="%s">' % escape(force_unicode(option_value))) for option in option_label: output.append(self.render_option(selected_choices, *option)) output.append(u'</optgroup>') else: output.append(self.render_option(selected_choices, option_value, option_label)) return u'\n'.join(output) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ((u'1', ugettext('Unknown')), (u'2', ugettext('Yes')), (u'3', ugettext('No'))) super(NullBooleanSelect, self).__init__(attrs, choices) def render(self, name, value, attrs=None, choices=()): try: value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value] except KeyError: value = u'1' return super(NullBooleanSelect, self).render(name, value, attrs, choices) def value_from_datadict(self, data, files, name): value = data.get(name, None) return {u'2': True, True: True, 'True': True, u'3': False, 'False': False, False: False}.get(value, None) def _has_changed(self, initial, data): # For a NullBooleanSelect, None (unknown) and False (No) # are not the same if initial is not None: initial = bool(initial) if data is not None: data = bool(data) return initial != data class SelectMultiple(Select): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, name=name) output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)] options = self.render_options(choices, value) if options: output.append(options) output.append('</select>') return mark_safe(u'\n'.join(output)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) def _has_changed(self, initial, data): if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = set([force_unicode(value) for value in initial]) data_set = set([force_unicode(value) for value in data]) return data_set != initial_set class RadioInput(StrAndUnicode): """ An object used by RadioFieldRenderer that represents a single <input type='radio'>. """ def __init__(self, name, value, attrs, choice, index): self.name, self.value = name, value self.attrs = attrs self.choice_value = force_unicode(choice[0]) self.choice_label = force_unicode(choice[1]) self.index = index def __unicode__(self): if 'id' in self.attrs: label_for = ' for="%s_%s"' % (self.attrs['id'], self.index) else: label_for = '' choice_label = conditional_escape(force_unicode(self.choice_label)) return mark_safe(u'<label%s>%s %s</label>' % (label_for, self.tag(), choice_label)) def is_checked(self): return self.value == self.choice_value def tag(self): if 'id' in self.attrs: self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index) final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value) if self.is_checked(): final_attrs['checked'] = 'checked' return mark_safe(u'<input%s />' % flatatt(final_attrs)) class RadioFieldRenderer(StrAndUnicode): """ An object used by RadioSelect to enable customization of radio widgets. """ def __init__(self, name, value, attrs, choices): self.name, self.value, self.attrs = name, value, attrs self.choices = choices def __iter__(self): for i, choice in enumerate(self.choices): yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx) def __unicode__(self): return self.render() def render(self): """Outputs a <ul> for this set of radio fields.""" return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self])) class RadioSelect(Select): renderer = RadioFieldRenderer def __init__(self, *args, **kwargs): # Override the default renderer if we were passed one. renderer = kwargs.pop('renderer', None) if renderer: self.renderer = renderer super(RadioSelect, self).__init__(*args, **kwargs) def get_renderer(self, name, value, attrs=None, choices=()): """Returns an instance of the renderer.""" if value is None: value = '' str_value = force_unicode(value) # Normalize to string. final_attrs = self.build_attrs(attrs) choices = list(chain(self.choices, choices)) return self.renderer(name, str_value, final_attrs, choices) def render(self, name, value, attrs=None, choices=()): return self.get_renderer(name, value, attrs, choices).render() def id_for_label(self, id_): # RadioSelect is represented by multiple <input type="radio"> fields, # each of which has a distinct ID. The IDs are made distinct by a "_X" # suffix, where X is the zero-based index of the radio field. Thus, # the label for a RadioSelect should reference the first one ('_0'). if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class CheckboxSelectMultiple(SelectMultiple): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = [u'<ul>'] # Normalize to strings str_values = set([force_unicode(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = u' for="%s"' % final_attrs['id'] else: label_for = '' cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values) option_value = force_unicode(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_unicode(option_label)) output.append(u'<li><label%s>%s %s</label></li>' % (label_for, rendered_cb, option_label)) output.append(u'</ul>') return mark_safe(u'\n'.join(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. Its render() method is different than other widgets', because it has to figure out how to split a single value for display in multiple widgets. The ``value`` argument can be one of two things: * A list. * A normal value (e.g., a string) that has been "compressed" from a list of values. In the second case -- i.e., if the value is NOT a list -- render() will first "decompress" the value into a list before rendering it. It does so by calling the decompress() method, which MultiWidget subclasses must implement. This method takes a single "compressed" value and returns a list. When render() does its HTML rendering, each value in the list is rendered with the corresponding widget -- the first value is rendered in the first widget, the second value is rendered in the second widget, etc. Subclasses may implement format_output(), which takes the list of rendered widgets and returns a string of HTML that formats them any way you'd like. You'll probably want to use this class with MultiValueField. """ def __init__(self, widgets, attrs=None): self.widgets = [isinstance(w, type) and w() or w for w in widgets] super(MultiWidget, self).__init__(attrs) def render(self, name, value, attrs=None): if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) def value_from_datadict(self, data, files, name): return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)] def _has_changed(self, initial, data): if initial is None: initial = [u'' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.decompress(initial) for widget, initial, data in zip(self.widgets, initial, data): if widget._has_changed(initial, data): return True return False def format_output(self, rendered_widgets): """ Given a list of rendered widgets (as strings), returns a Unicode string representing the HTML for the whole lot. This hook allows you to format the HTML design of the widgets, if needed. """ return u''.join(rendered_widgets) def decompress(self, value): """ Returns a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): "Media for a multiwidget is the combination of all media of the subwidgets" media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super(MultiWidget, self).__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj class SplitDateTimeWidget(MultiWidget): """ A Widget that splits datetime input into two <input type="text"> boxes. """ date_format = DateInput.format time_format = TimeInput.format def __init__(self, attrs=None, date_format=None, time_format=None): widgets = (DateInput(attrs=attrs, format=date_format), TimeInput(attrs=attrs, format=time_format)) super(SplitDateTimeWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: return [value.date(), value.time().replace(microsecond=0)] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A Widget that splits datetime input into two <input type="hidden"> inputs. """ is_hidden = True def __init__(self, attrs=None, date_format=None, time_format=None): super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format) for widget in self.widgets: widget.input_type = 'hidden' widget.is_hidden = True
gpl-3.0
mou4e/zirconium
ui/ozone/generate_ozone_platform_list.py
102
4432
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Code generator for Ozone platform list. This script takes as arguments a list of platform names and generates a C++ source file containing a list of those platforms. Each platform gets an integer identifier that is used to find objects for that platform (particularly constructors for platform-specific objects). Example Output: ./generate_ozone_platform_list.py --default wayland dri wayland // platform_list.txt wayland dri // platform_list.h #ifndef UI_OZONE_PLATFORM_LIST_H_ #define UI_OZONE_PLATFORM_LIST_H_ namespace ui { const int kPlatformWayland = 0; const int kPlatformDri = 1; extern const char *kPlatformNames[kPlatformCount]; } // namespace ui // platform_list.cc #include "ui/ozone/platform_list.h" namespace ui { const char *kPlatformNames[] = { "wayland", // kPlatformWayland "dri", // kPlatformDri }; } // namespace ui #endif // UI_OZONE_PLATFORM_LIST_H_ """ import optparse import os import collections import re import sys import string def GetConstantName(name): """Determine name of static constructor function from platform name. We just capitalize the platform name and prepend "CreateOzonePlatform". """ return 'kPlatform' + string.capitalize(name) def GeneratePlatformListText(out, platforms): """Generate text file with list of platform names, in platform id order.""" for platform in platforms: out.write(platform) out.write('\n') out.write('\n') def GeneratePlatformListHeader(out, platforms): """Generate ids of ozone platforms & declaration of static names array.""" out.write('// DO NOT MODIFY. GENERATED BY generate_ozone_platform_list.py\n') out.write('\n') out.write('#ifndef UI_OZONE_PLATFORM_LIST_H_\n') out.write('#define UI_OZONE_PLATFORM_LIST_H_\n') out.write('\n') out.write('namespace ui {\n') out.write('\n') # Prototypes for platform initializers. for plat_id, plat_name in enumerate(platforms): out.write('const int %s = %d;\n' % (GetConstantName(plat_name), plat_id)) out.write('\n') # Platform count. out.write('const int kPlatformCount = %d;\n' % len(platforms)) out.write('\n') # Declaration for names list. out.write('extern const char* kPlatformNames[kPlatformCount];\n') out.write('\n') out.write('} // namespace ui\n') out.write('\n') out.write('#endif // UI_OZONE_PLATFORM_LIST_H_\n') out.write('\n') def GeneratePlatformListSource(out, platforms): """Generate static array containing a list of ozone platforms.""" out.write('// DO NOT MODIFY. GENERATED BY generate_ozone_platform_list.py\n') out.write('\n') out.write('#include "ui/ozone/platform_list.h"\n') out.write('\n') out.write('namespace ui {\n') out.write('\n') # Definition of names list. out.write('const char* kPlatformNames[] = {\n') # Prototypes for platform initializers. for plat_name in platforms: out.write(' "%s", // %s\n' % (plat_name, GetConstantName(plat_name))) out.write('};\n') out.write('\n') out.write('} // namespace ui\n') out.write('\n') def main(argv): parser = optparse.OptionParser() parser.add_option('--output_cc') parser.add_option('--output_h') parser.add_option('--output_txt') parser.add_option('--default') options, platforms = parser.parse_args(argv) # Reorder the platforms when --default is specified. # The default platform must appear first in the platform list. if options.default and options.default in platforms: platforms.remove(options.default) platforms.insert(0, options.default) # Write to standard output or file specified by --output_{cc,h}. out_cc = sys.stdout out_h = sys.stdout out_txt = sys.stdout if options.output_cc: out_cc = open(options.output_cc, 'wb') if options.output_h: out_h = open(options.output_h, 'wb') if options.output_txt: out_txt = open(options.output_txt, 'wb') GeneratePlatformListText(out_txt, platforms) GeneratePlatformListHeader(out_h, platforms) GeneratePlatformListSource(out_cc, platforms) if options.output_cc: out_cc.close() if options.output_h: out_h.close() if options.output_txt: out_txt.close() return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
bsd-3-clause
wasade/qiime
qiime/make_bootstrapped_tree.py
1
1352
#!/usr/bin/env python from __future__ import division __author__ = "Justin Kuczynski" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Justin Kuczynski"] __license__ = "GPL" __version__ = "1.8.0-dev" __maintainer__ = "Justin Kuczynski" __email__ = "justinak@gmail.com" """takes a tree and bootstrap support file and writes a pdf, colored by bootstrap support """ from matplotlib import use use('Agg', warn=False) from cogent.draw.dendrogram import SquareDendrogram import os.path import sys def write_pdf_bootstrap_tree(tree, output_f, hits_dict): def f(node): if not node.Name: return 'black' tip_id = node.Name.split('/')[0] try: if hits_dict[tip_id] < .25: return 'blue' elif hits_dict[tip_id] < .5: return 'green' elif hits_dict[tip_id] < .75: return 'yellow' elif hits_dict[tip_id] <= 1.1: return 'red' return 'black' except: return 'black' t = SquareDendrogram(tree) # Make output size proportional to the tree size. width = 8 * len(tree.tips()) height = 8 * len(tree.tips()) if width < 700: width = 700 if height < 700: height = 700 t.drawToPDF(output_f, width, height, edge_color_callback=f)
gpl-2.0
hsaputra/tensorflow
tensorflow/tools/dist_test/scripts/k8s_tensorflow_test.py
130
4446
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.tools.dist_test.scripts.k8s_tensorflow_lib.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import googletest from tensorflow.tools.dist_test.scripts import k8s_tensorflow_lib class K8sTensorflowTest(googletest.TestCase): def testGenerateConfig_LoadBalancer(self): # Use loadbalancer config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=True, docker_image='test_image', name_prefix='abc', use_shared_volume=False) self.assertTrue('LoadBalancer' in config) # Don't use loadbalancer config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=False, docker_image='test_image', name_prefix='abc', use_shared_volume=False) self.assertFalse('LoadBalancer' in config) def testGenerateConfig_SharedVolume(self): # Use shared directory config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=False, docker_image='test_image', name_prefix='abc', use_shared_volume=True) self.assertTrue('/shared' in config) # Don't use shared directory config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=False, docker_image='test_image', name_prefix='abc', use_shared_volume=False) self.assertFalse('/shared' in config) def testEnvVar(self): # Use loadbalancer config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=True, docker_image='test_image', name_prefix='abc', use_shared_volume=False, env_vars={'test1': 'test1_value', 'test2': 'test2_value'}) self.assertTrue('{name: "test1", value: "test1_value"}' in config) self.assertTrue('{name: "test2", value: "test2_value"}' in config) def testClusterSpec(self): # Use cluster_spec config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=True, docker_image='test_image', name_prefix='abc', use_shared_volume=False, use_cluster_spec=True) self.assertFalse('worker_hosts' in config) self.assertFalse('ps_hosts' in config) self.assertTrue( '"--cluster_spec=worker|abc-worker0:5000,ps|abc-ps0:5000"' in config) # Don't use cluster_spec config = k8s_tensorflow_lib.GenerateConfig( num_workers=1, num_param_servers=1, port=5000, request_load_balancer=True, docker_image='test_image', name_prefix='abc', use_shared_volume=False, use_cluster_spec=False) self.assertFalse('cluster_spec' in config) self.assertTrue('"--worker_hosts=abc-worker0:5000"' in config) self.assertTrue('"--ps_hosts=abc-ps0:5000"' in config) def testWorkerHosts(self): self.assertEquals( 'test_prefix-worker0:1234', k8s_tensorflow_lib.WorkerHosts(1, 1234, 'test_prefix')) self.assertEquals( 'test_prefix-worker0:1234,test_prefix-worker1:1234', k8s_tensorflow_lib.WorkerHosts(2, 1234, 'test_prefix')) def testPsHosts(self): self.assertEquals( 'test_prefix-ps0:1234,test_prefix-ps1:1234', k8s_tensorflow_lib.PsHosts(2, 1234, 'test_prefix')) if __name__ == '__main__': googletest.main()
apache-2.0
maohongyuan/kbengine
kbe/src/lib/python/Lib/test/test_asyncio/test_windows_utils.py
60
5801
"""Tests for window_utils""" import socket import sys import test.support import unittest from test.support import IPV6_ENABLED from unittest import mock if sys.platform != 'win32': raise unittest.SkipTest('Windows only') import _winapi from asyncio import windows_utils from asyncio import _overlapped class WinsocketpairTests(unittest.TestCase): def check_winsocketpair(self, ssock, csock): csock.send(b'xxx') self.assertEqual(b'xxx', ssock.recv(1024)) csock.close() ssock.close() def test_winsocketpair(self): ssock, csock = windows_utils.socketpair() self.check_winsocketpair(ssock, csock) @unittest.skipUnless(IPV6_ENABLED, 'IPv6 not supported or enabled') def test_winsocketpair_ipv6(self): ssock, csock = windows_utils.socketpair(family=socket.AF_INET6) self.check_winsocketpair(ssock, csock) @mock.patch('asyncio.windows_utils.socket') def test_winsocketpair_exc(self, m_socket): m_socket.AF_INET = socket.AF_INET m_socket.SOCK_STREAM = socket.SOCK_STREAM m_socket.socket.return_value.getsockname.return_value = ('', 12345) m_socket.socket.return_value.accept.return_value = object(), object() m_socket.socket.return_value.connect.side_effect = OSError() self.assertRaises(OSError, windows_utils.socketpair) def test_winsocketpair_invalid_args(self): self.assertRaises(ValueError, windows_utils.socketpair, family=socket.AF_UNSPEC) self.assertRaises(ValueError, windows_utils.socketpair, type=socket.SOCK_DGRAM) self.assertRaises(ValueError, windows_utils.socketpair, proto=1) @mock.patch('asyncio.windows_utils.socket') def test_winsocketpair_close(self, m_socket): m_socket.AF_INET = socket.AF_INET m_socket.SOCK_STREAM = socket.SOCK_STREAM sock = mock.Mock() m_socket.socket.return_value = sock sock.bind.side_effect = OSError self.assertRaises(OSError, windows_utils.socketpair) self.assertTrue(sock.close.called) class PipeTests(unittest.TestCase): def test_pipe_overlapped(self): h1, h2 = windows_utils.pipe(overlapped=(True, True)) try: ov1 = _overlapped.Overlapped() self.assertFalse(ov1.pending) self.assertEqual(ov1.error, 0) ov1.ReadFile(h1, 100) self.assertTrue(ov1.pending) self.assertEqual(ov1.error, _winapi.ERROR_IO_PENDING) ERROR_IO_INCOMPLETE = 996 try: ov1.getresult() except OSError as e: self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE) else: raise RuntimeError('expected ERROR_IO_INCOMPLETE') ov2 = _overlapped.Overlapped() self.assertFalse(ov2.pending) self.assertEqual(ov2.error, 0) ov2.WriteFile(h2, b"hello") self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING}) res = _winapi.WaitForMultipleObjects([ov2.event], False, 100) self.assertEqual(res, _winapi.WAIT_OBJECT_0) self.assertFalse(ov1.pending) self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE) self.assertFalse(ov2.pending) self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING}) self.assertEqual(ov1.getresult(), b"hello") finally: _winapi.CloseHandle(h1) _winapi.CloseHandle(h2) def test_pipe_handle(self): h, _ = windows_utils.pipe(overlapped=(True, True)) _winapi.CloseHandle(_) p = windows_utils.PipeHandle(h) self.assertEqual(p.fileno(), h) self.assertEqual(p.handle, h) # check garbage collection of p closes handle del p test.support.gc_collect() try: _winapi.CloseHandle(h) except OSError as e: self.assertEqual(e.winerror, 6) # ERROR_INVALID_HANDLE else: raise RuntimeError('expected ERROR_INVALID_HANDLE') class PopenTests(unittest.TestCase): def test_popen(self): command = r"""if 1: import sys s = sys.stdin.readline() sys.stdout.write(s.upper()) sys.stderr.write('stderr') """ msg = b"blah\n" p = windows_utils.Popen([sys.executable, '-c', command], stdin=windows_utils.PIPE, stdout=windows_utils.PIPE, stderr=windows_utils.PIPE) for f in [p.stdin, p.stdout, p.stderr]: self.assertIsInstance(f, windows_utils.PipeHandle) ovin = _overlapped.Overlapped() ovout = _overlapped.Overlapped() overr = _overlapped.Overlapped() ovin.WriteFile(p.stdin.handle, msg) ovout.ReadFile(p.stdout.handle, 100) overr.ReadFile(p.stderr.handle, 100) events = [ovin.event, ovout.event, overr.event] # Super-long timeout for slow buildbots. res = _winapi.WaitForMultipleObjects(events, True, 10000) self.assertEqual(res, _winapi.WAIT_OBJECT_0) self.assertFalse(ovout.pending) self.assertFalse(overr.pending) self.assertFalse(ovin.pending) self.assertEqual(ovin.getresult(), len(msg)) out = ovout.getresult().rstrip() err = overr.getresult().rstrip() self.assertGreater(len(out), 0) self.assertGreater(len(err), 0) # allow for partial reads... self.assertTrue(msg.upper().rstrip().startswith(out)) self.assertTrue(b"stderr".startswith(err)) p.wait() if __name__ == '__main__': unittest.main()
lgpl-3.0
KiChjang/servo
components/script/dom/bindings/codegen/parser/tests/test_special_methods_uniqueness.py
35
1078
import WebIDL def WebIDLTest(parser, harness): threw = False try: parser.parse(""" interface SpecialMethodUniqueness1 { getter deleter boolean (DOMString name); getter boolean (DOMString name); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown.") threw = False try: parser.parse(""" interface SpecialMethodUniqueness1 { deleter boolean (DOMString name); getter deleter boolean (DOMString name); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown.") threw = False try: parser.parse(""" interface SpecialMethodUniqueness1 { setter boolean (DOMString name); setter boolean (DOMString name); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown.")
mpl-2.0
BenzoPlayer/kernel_asus_fugu
tools/perf/tests/attr.py
3174
9441
#! /usr/bin/python import os import sys import glob import optparse import tempfile import logging import shutil import ConfigParser class Fail(Exception): def __init__(self, test, msg): self.msg = msg self.test = test def getMsg(self): return '\'%s\' - %s' % (self.test.path, self.msg) class Unsup(Exception): def __init__(self, test): self.test = test def getMsg(self): return '\'%s\'' % self.test.path class Event(dict): terms = [ 'cpu', 'flags', 'type', 'size', 'config', 'sample_period', 'sample_type', 'read_format', 'disabled', 'inherit', 'pinned', 'exclusive', 'exclude_user', 'exclude_kernel', 'exclude_hv', 'exclude_idle', 'mmap', 'comm', 'freq', 'inherit_stat', 'enable_on_exec', 'task', 'watermark', 'precise_ip', 'mmap_data', 'sample_id_all', 'exclude_host', 'exclude_guest', 'exclude_callchain_kernel', 'exclude_callchain_user', 'wakeup_events', 'bp_type', 'config1', 'config2', 'branch_sample_type', 'sample_regs_user', 'sample_stack_user', ] def add(self, data): for key, val in data: log.debug(" %s = %s" % (key, val)) self[key] = val def __init__(self, name, data, base): log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) self.add(data) def compare_data(self, a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') b_list = b.split('|') for a_item in a_list: for b_item in b_list: if (a_item == b_item): return True elif (a_item == '*') or (b_item == '*'): return True return False def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); if not self.has_key(t) or not other.has_key(t): return False if not self.compare_data(self[t], other[t]): return False return True def diff(self, other): for t in Event.terms: if not self.has_key(t) or not other.has_key(t): continue if not self.compare_data(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) # Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - expected command return value (0 by default) # # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options): parser = ConfigParser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) self.path = path self.test_dir = options.test_dir self.perf = options.perf self.command = parser.get('config', 'command') self.args = parser.get('config', 'args') try: self.ret = parser.get('config', 'ret') except: self.ret = 0 self.expect = {} self.result = {} log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): if name.find("event") == -1: return False else: return True def load_events(self, path, events): parser_event = ConfigParser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()): parser_items = parser_event.items(section); base_items = {} # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] parser_base = ConfigParser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') e = Event(section, parser_items, base_items) events[section] = e def run_cmd(self, tempdir): cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) log.info(" '%s' ret %d " % (cmd, ret)) if ret != int(self.ret): raise Unsup(self) def compare(self, expect, result): match = {} log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)): exp_list.append(res_name) log.debug(" ->OK") else: log.debug(" ->FAIL"); log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if (not exp_list): exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list # For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items(): group = exp_event.group if (group == ''): continue for res_name in match[exp_name]: res_group = result[res_name].group if res_group not in match[group]: raise Fail(self, 'group failure') log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): group_fd = event['group_fd']; if group_fd == '-1': continue; for iname, ievent in events.items(): if (ievent['fd'] == group_fd): event.group = iname log.debug('[%s] has group leader [%s]' % (name, iname)) break; def run(self): tempdir = tempfile.mkdtemp(); try: # run the test script self.run_cmd(tempdir); # load events expectation for the test log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); # resolve group_fd to event names self.resolve_groups(self.expect); self.resolve_groups(self.result); # do the expectation - results matching - both ways self.compare(self.expect, self.result) self.compare(self.result, self.expect) finally: # cleanup shutil.rmtree(tempdir) def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() except Unsup, obj: log.warning("unsupp %s" % obj.getMsg()) def setup_log(verbose): global log level = logging.CRITICAL if verbose == 1: level = logging.WARNING if verbose == 2: level = logging.INFO if verbose >= 3: level = logging.DEBUG log = logging.getLogger('test') log.setLevel(level) ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) log.addHandler(ch) USAGE = '''%s [OPTIONS] -d dir # tests dir -p path # perf binary -t test # single test -v # verbose level ''' % sys.argv[0] def main(): parser = optparse.OptionParser(usage=USAGE) parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-d", "--test-dir", action="store", type="string", dest="test_dir") parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", action="count", dest="verbose") options, args = parser.parse_args() if args: parser.error('FAILED wrong arguments %s' % ' '.join(args)) return -1 setup_log(options.verbose) if not options.test_dir: print 'FAILED no -d option specified' sys.exit(-1) if not options.test: options.test = 'test*' try: run_tests(options) except Fail, obj: print "FAILED %s" % obj.getMsg(); sys.exit(-1) sys.exit(0) if __name__ == '__main__': main()
gpl-2.0
Lekanich/intellij-community
python/lib/Lib/site-packages/django/contrib/gis/sitemaps/views.py
250
4342
from django.http import HttpResponse, Http404 from django.template import loader from django.contrib.sites.models import get_current_site from django.core import urlresolvers from django.core.paginator import EmptyPage, PageNotAnInteger from django.contrib.gis.db.models.fields import GeometryField from django.db import connections, DEFAULT_DB_ALIAS from django.db.models import get_model from django.utils.encoding import smart_str from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz def index(request, sitemaps): """ This view generates a sitemap index that uses the proper view for resolving geographic section sitemap URLs. """ current_site = get_current_site(request) sites = [] protocol = request.is_secure() and 'https' or 'http' for section, site in sitemaps.items(): if callable(site): pages = site().paginator.num_pages else: pages = site.paginator.num_pages sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section}) sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url)) if pages > 1: for page in range(2, pages+1): sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page)) xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites}) return HttpResponse(xml, mimetype='application/xml') def sitemap(request, sitemaps, section=None): """ This view generates a sitemap with additional geographic elements defined by Google. """ maps, urls = [], [] if section is not None: if section not in sitemaps: raise Http404("No sitemap available for section: %r" % section) maps.append(sitemaps[section]) else: maps = sitemaps.values() page = request.GET.get("p", 1) current_site = get_current_site(request) for site in maps: try: if callable(site): urls.extend(site().get_urls(page=page, site=current_site)) else: urls.extend(site.get_urls(page=page, site=current_site)) except EmptyPage: raise Http404("Page %s empty" % page) except PageNotAnInteger: raise Http404("No page '%s'" % page) xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls})) return HttpResponse(xml, mimetype='application/xml') def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS): """ This view generates KML for the given app label, model, and field name. The model's default manager must be GeoManager, and the field name must be that of a geographic field. """ placemarks = [] klass = get_model(label, model) if not klass: raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model)) if field_name: try: info = klass._meta.get_field_by_name(field_name) if not isinstance(info[0], GeometryField): raise Exception except: raise Http404('Invalid geometry field.') connection = connections[using] if connection.ops.postgis: # PostGIS will take care of transformation. placemarks = klass._default_manager.using(using).kml(field_name=field_name) else: # There's no KML method on Oracle or MySQL, so we use the `kml` # attribute of the lazy geometry instead. placemarks = [] if connection.ops.oracle: qs = klass._default_manager.using(using).transform(4326, field_name=field_name) else: qs = klass._default_manager.using(using).all() for mod in qs: mod.kml = getattr(mod, field_name).kml placemarks.append(mod) # Getting the render function and rendering to the correct. if compress: render = render_to_kmz else: render = render_to_kml return render('gis/kml/placemarks.kml', {'places' : placemarks}) def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS): """ This view returns KMZ for the given app label, model, and field name. """ return kml(request, label, model, field_name, compress=True, using=using)
apache-2.0
xme1226/horizon
openstack_dashboard/dashboards/project/data_processing/data_plugins/tables.py
6
1392
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from django.template import defaultfilters as filters from django.utils.translation import ugettext_lazy as _ from horizon import tables LOG = logging.getLogger(__name__) class PluginsTable(tables.DataTable): title = tables.Column("title", verbose_name=_("Title"), link=("horizon:project:data_processing." "data_plugins:details")) versions = tables.Column("versions", verbose_name=_("Supported Versions"), wrap_list=True, filters=(filters.unordered_list,)) description = tables.Column("description", verbose_name=_("Description")) class Meta: name = "plugins" verbose_name = _("Plugins")
apache-2.0
xingh/omaha
tools/generate_omaha3_idl.py
64
3345
#!/usr/bin/python2.4 # # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== """Generates IDL file Omaha3 internfaces.""" import commands import getopt import os import sys def _GetStatusOutput(cmd): """Return (status, output) of executing cmd in a shell.""" if os.name == "nt": pipe = os.popen(cmd + " 2>&1", "r") text = pipe.read() sts = pipe.close() if sts is None: sts = 0 if text[-1:] == "\n": text = text[:-1] return sts, text else: return commands.getstatusoutput(cmd) def _GenerateGuid(): (status, guid) = _GetStatusOutput("uuidgen.exe /c") if status != 0: raise SystemError("Failed to get GUID: %s" % guid) return guid def _GenerateIDLText(idl_template): guid_placehold_marker = "___AUTO_GENERATED_GUID___" while guid_placehold_marker in idl_template: idl_template = idl_template.replace(guid_placehold_marker, _GenerateGuid(), 1) return idl_template def _GenerateIDLFile(idl_template_filename, idl_output_filename): f_in = open(idl_template_filename, "r") idl_template = f_in.read() f_in.close() idl_output = _GenerateIDLText(idl_template) f_out = open(idl_output_filename, "w") f_out.write("// *** AUTOGENERATED FILE. DO NOT HAND-EDIT ***\n\n") f_out.write(idl_output) f_out.close() def _Usage(): """Prints out script usage information.""" print """ generate_omaha3_idl.py: Write out the given IDL file. Usage: generate_omaha3_idl.py [--help | --idl_template_file filename --idl_output_file filename] Options: --help Show this information. --idl_output_file filename Path/name of output IDL filename. --idl_template_file filename Path/name of input IDL template. """ def _Main(): """Generates IDL file.""" # use getopt to parse the option and argument list; this may raise, but # don't catch it argument_list = ["help", "idl_template_file=", "idl_output_file="] (opts, unused_args) = getopt.getopt(sys.argv[1:], "", argument_list) if not opts or ("--help", "") in opts: _Usage() sys.exit() idl_template_filename = "" idl_output_filename = "" for (o, v) in opts: if o == "--idl_template_file": idl_template_filename = v if o == "--idl_output_file": idl_output_filename = v # make sure we have work to do if not idl_template_filename: raise StandardError("no idl_template_filename specified") if not idl_output_filename: raise StandardError("no idl_output_filename specified") _GenerateIDLFile(idl_template_filename, idl_output_filename) sys.exit() if __name__ == "__main__": _Main()
apache-2.0
zrhans/pythonanywhere
.virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_msgpack/test_newspec.py
9
2586
# coding: utf-8 from pandas.msgpack import packb, unpackb, ExtType def test_str8(): header = b'\xd9' data = b'x' * 32 b = packb(data.decode(), use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\x20' assert b[2:] == data assert unpackb(b) == data data = b'x' * 255 b = packb(data.decode(), use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\xff' assert b[2:] == data assert unpackb(b) == data def test_bin8(): header = b'\xc4' data = b'' b = packb(data, use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\x00' assert b[2:] == data assert unpackb(b) == data data = b'x' * 255 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\xff' assert b[2:] == data assert unpackb(b) == data def test_bin16(): header = b'\xc5' data = b'x' * 256 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 3 assert b[0:1] == header assert b[1:3] == b'\x01\x00' assert b[3:] == data assert unpackb(b) == data data = b'x' * 65535 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 3 assert b[0:1] == header assert b[1:3] == b'\xff\xff' assert b[3:] == data assert unpackb(b) == data def test_bin32(): header = b'\xc6' data = b'x' * 65536 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 5 assert b[0:1] == header assert b[1:5] == b'\x00\x01\x00\x00' assert b[5:] == data assert unpackb(b) == data def test_ext(): def check(ext, packed): assert packb(ext) == packed assert unpackb(packed) == ext check(ExtType(0x42, b'Z'), b'\xd4\x42Z') # fixext 1 check(ExtType(0x42, b'ZZ'), b'\xd5\x42ZZ') # fixext 2 check(ExtType(0x42, b'Z'*4), b'\xd6\x42' + b'Z'*4) # fixext 4 check(ExtType(0x42, b'Z'*8), b'\xd7\x42' + b'Z'*8) # fixext 8 check(ExtType(0x42, b'Z'*16), b'\xd8\x42' + b'Z'*16) # fixext 16 # ext 8 check(ExtType(0x42, b''), b'\xc7\x00\x42') check(ExtType(0x42, b'Z'*255), b'\xc7\xff\x42' + b'Z'*255) # ext 16 check(ExtType(0x42, b'Z'*256), b'\xc8\x01\x00\x42' + b'Z'*256) check(ExtType(0x42, b'Z'*0xffff), b'\xc8\xff\xff\x42' + b'Z'*0xffff) # ext 32 check(ExtType(0x42, b'Z'*0x10000), b'\xc9\x00\x01\x00\x00\x42' + b'Z'*0x10000) # needs large memory #check(ExtType(0x42, b'Z'*0xffffffff), # b'\xc9\xff\xff\xff\xff\x42' + b'Z'*0xffffffff)
apache-2.0
wesm/impyla
impala/tests/_dbapi20_tests.py
4
32294
#!/usr/bin/env python ''' Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. "Now we have booze and barflies entering the discussion, plus rumours of DBAs on drugs... and I won't tell you what flashes through my mind each time I read the subject line with 'Anal Compliance' in it. All around this is turning out to be a thoroughly unwholesome unit test." -- Ian Bicking ''' from __future__ import absolute_import __rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $' __version__ = '$Revision: 1.12 $'[11:-2] __author__ = 'Stuart Bishop <stuart@stuartbishop.net>' import time import sys from six.moves import range from impala.tests.compat import unittest # Revision 1.12 2009/02/06 03:35:11 kf7xm # Tested okay with Python 3.0, includes last minute patches from Mark H. # # Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole # Include latest changes from main branch # Updates for py3k # # Revision 1.11 2005/01/02 02:41:01 zenzen # Update author email address # # Revision 1.10 2003/10/09 03:14:14 zenzen # Add test for DB API 2.0 optional extension, where database exceptions # are exposed as attributes on the Connection object. # # Revision 1.9 2003/08/13 01:16:36 zenzen # Minor tweak from Stefan Fleiter # # Revision 1.8 2003/04/10 00:13:25 zenzen # Changes, as per suggestions by M.-A. Lemburg # - Add a table prefix, to ensure namespace collisions can always be avoided # # Revision 1.7 2003/02/26 23:33:37 zenzen # Break out DDL into helper functions, as per request by David Rushby # # Revision 1.6 2003/02/21 03:04:33 zenzen # Stuff from Henrik Ekelund: # added test_None # added test_nextset & hooks # # Revision 1.5 2003/02/17 22:08:43 zenzen # Implement suggestions and code from Henrik Eklund - test that cursor.arraysize # defaults to 1 & generic cursor.callproc test added # # Revision 1.4 2003/02/15 00:16:33 zenzen # Changes, as per suggestions and bug reports by M.-A. Lemburg, # Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar # - Class renamed # - Now a subclass of TestCase, to avoid requiring the driver stub # to use multiple inheritance # - Reversed the polarity of buggy test in test_description # - Test exception heirarchy correctly # - self.populate is now self._populate(), so if a driver stub # overrides self.ddl1 this change propogates # - VARCHAR columns now have a width, which will hopefully make the # DDL even more portible (this will be reversed if it causes more problems) # - cursor.rowcount being checked after various execute and fetchXXX methods # - Check for fetchall and fetchmany returning empty lists after results # are exhausted (already checking for empty lists if select retrieved # nothing # - Fix bugs in test_setoutputsize_basic and test_setinputsizes # def str2bytes(sval): if sys.version_info < (3,0) and isinstance(sval, str): sval = sval.decode("latin1") return sval.encode("latin1") class DatabaseAPI20Test(unittest.TestCase): ''' Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compiliance with the DB-API. It is expected that this TestCase may be expanded in the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. ''' # The self.driver module. This should be the module where the 'connect' # method is to be found driver = None connect_args = () # List of arguments to pass to connect connect_kw_args = {} # Keyword arguments for connect table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix xddl1 = 'drop table %sbooze' % table_prefix xddl2 = 'drop table %sbarflys' % table_prefix lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def executeDDL1(self,cursor): cursor.execute(self.ddl1) def executeDDL2(self,cursor): cursor.execute(self.ddl2) def setUp(self): ''' self.drivers should override this method to perform required setup if any is necessary, such as creating the database. ''' pass def tearDown(self): ''' self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. ''' con = self._connect() try: cur = con.cursor() for ddl in (self.xddl1,self.xddl2): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() def _connect(self): try: return self.driver.connect( *self.connect_args,**self.connect_kw_args ) except AttributeError: self.fail("No connect method found in self.driver module") def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel,'2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.failUnless(threadsafety in (0,1,2,3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.failUnless(paramstyle in ( 'qmark','numeric','named','format','pyformat' )) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_Exceptions(self): # Make sure required exceptions exist, and are in the # defined heirarchy. if sys.version[0] == '3': #under Python 3 StardardError no longer exists self.failUnless(issubclass(self.driver.Warning,Exception)) self.failUnless(issubclass(self.driver.Error,Exception)) else: self.failUnless(issubclass(self.driver.Warning,Exception)) self.failUnless(issubclass(self.driver.Error,Exception)) self.failUnless( issubclass(self.driver.InterfaceError,self.driver.Error) ) self.failUnless( issubclass(self.driver.DatabaseError,self.driver.Error) ) self.failUnless( issubclass(self.driver.OperationalError,self.driver.Error) ) self.failUnless( issubclass(self.driver.IntegrityError,self.driver.Error) ) self.failUnless( issubclass(self.driver.InternalError,self.driver.Error) ) self.failUnless( issubclass(self.driver.ProgrammingError,self.driver.Error) ) self.failUnless( issubclass(self.driver.NotSupportedError,self.driver.Error) ) def test_ExceptionsAsConnectionAttributes(self): # OPTIONAL EXTENSION # Test for the optional DB API 2.0 extension, where the exceptions # are exposed as attributes on the Connection object # I figure this optional extension will be implemented by any # driver author who is using this test suite, so it is enabled # by default. con = self._connect() drv = self.driver self.failUnless(con.Warning is drv.Warning) self.failUnless(con.Error is drv.Error) self.failUnless(con.InterfaceError is drv.InterfaceError) self.failUnless(con.DatabaseError is drv.DatabaseError) self.failUnless(con.OperationalError is drv.OperationalError) self.failUnless(con.IntegrityError is drv.IntegrityError) self.failUnless(con.InternalError is drv.InternalError) self.failUnless(con.ProgrammingError is drv.ProgrammingError) self.failUnless(con.NotSupportedError is drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con,'rollback'): try: con.rollback() except self.driver.NotSupportedError: pass def test_cursor(self): con = self._connect() try: cur = con.cursor() finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.executeDDL1(cur1) cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) cur2.execute("select name from %sbooze" % self.table_prefix) booze = cur2.fetchall() self.assertEqual(len(booze),1) self.assertEqual(len(booze[0]),1) self.assertEqual(booze[0][0],'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.description,None, 'cursor.description should be none after executing a ' 'statement that can return no rows (such as DDL)' ) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(len(cur.description),1, 'cursor.description describes too many columns' ) self.assertEqual(len(cur.description[0]),7, 'cursor.description[x] tuples must have 7 elements' ) self.assertEqual(cur.description[0][0].lower(),'name', 'cursor.description[x][0] must return column name' ) self.assertEqual(cur.description[0][1],self.driver.STRING, 'cursor.description[x][1] must return column type. Got %r' % cur.description[0][1] ) # Make sure self.description gets reset self.executeDDL2(cur) self.assertEqual(cur.description,None, 'cursor.description not being set to None when executing ' 'no-result statements (eg. DDL)' ) finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount should be -1 after executing no-result ' 'statements' ) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.failUnless(cur.rowcount in (-1,1), 'cursor.rowcount should == number or rows inserted, or ' 'set to -1 after executing an insert statement' ) cur.execute("select name from %sbooze" % self.table_prefix) self.failUnless(cur.rowcount in (-1,1), 'cursor.rowcount should == number of rows returned, or ' 'set to -1 after executing a select statement' ) self.executeDDL2(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount not being reset to -1 after executing ' 'no-result statements' ) finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur,'callproc'): r = cur.callproc(self.lower_func,('FOO',)) self.assertEqual(len(r),1) self.assertEqual(r[0],'FOO') r = cur.fetchall() self.assertEqual(len(r),1,'callproc produced no result set') self.assertEqual(len(r[0]),1, 'callproc produced invalid result set' ) self.assertEqual(r[0][0],'foo', 'callproc produced invalid results' ) finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error,self.executeDDL1,cur) # laserson note: the next to assertions are not clear to me from PEP 249 # so I am leaving them out # connection.commit should raise an Error if called after connection' # closed.' # self.assertRaises(self.driver.Error,con.commit) # connection.close should raise an Error if called more than once # self.assertRaises(self.driver.Error,con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self,cur): self.executeDDL1(cur) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.failUnless(cur.rowcount in (-1,1)) if self.driver.paramstyle == 'qmark': cur.execute( 'insert into %sbooze values (?)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'numeric': cur.execute( 'insert into %sbooze values (:1)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'named': cur.execute( 'insert into %sbooze values (:beer)' % self.table_prefix, {'beer':"Cooper's"} ) elif self.driver.paramstyle == 'format': cur.execute( 'insert into %sbooze values (%%s)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'pyformat': cur.execute( 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, {'beer':"Cooper's"} ) else: self.fail('Invalid paramstyle') self.failUnless(cur.rowcount in (-1,1)) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) self.assertEqual(beers[1],"Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) def test_executemany(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) largs = [ ("Cooper's",) , ("Boag's",) ] margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] if self.driver.paramstyle == 'qmark': cur.executemany( 'insert into %sbooze values (?)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'numeric': cur.executemany( 'insert into %sbooze values (:1)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'named': cur.executemany( 'insert into %sbooze values (:beer)' % self.table_prefix, margs ) elif self.driver.paramstyle == 'format': cur.executemany( 'insert into %sbooze values (%%s)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'pyformat': cur.executemany( 'insert into %sbooze values (%%(beer)s)' % ( self.table_prefix ), margs ) else: self.fail('Unknown paramstyle') self.failUnless(cur.rowcount in (-1,2), 'insert using cursor.executemany set cursor.rowcount to ' 'incorrect value %r' % cur.rowcount ) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2, 'cursor.fetchall retrieved incorrect number of rows' ) beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error,cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows self.executeDDL1(cur) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if a query retrieves ' 'no rows' ) self.failUnless(cur.rowcount in (-1,0)) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchone() self.assertEqual(len(r),1, 'cursor.fetchone should have retrieved a single row' ) self.assertEqual(r[0],'Victoria Bitter', 'cursor.fetchone retrieved incorrect data' ) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if no more rows available' ) self.failUnless(cur.rowcount in (-1,1)) finally: con.close() samples = [ 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ] def _populate(self): ''' Return a list of sql commands to setup the DB for the fetch tests. ''' populate = [ "insert into %sbooze values ('%s')" % (self.table_prefix,s) for s in self.samples ] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without #issuing a query self.assertRaises(self.driver.Error,cur.fetchmany,4) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() self.assertEqual(len(r),1, 'cursor.fetchmany retrieved incorrect number of rows, ' 'default of arraysize is one.' ) cur.arraysize=10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual(len(r),3, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should get 2 more self.assertEqual(len(r),2, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence after ' 'results are exhausted' ) self.failUnless(cur.rowcount in (-1,6)) # Same as above, using cursor.arraysize cur.arraysize=4 cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() # Should get 4 rows self.assertEqual(len(r),4, 'cursor.arraysize not being honoured by fetchmany' ) r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r),2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r),0) self.failUnless(cur.rowcount in (-1,6)) cur.arraysize=6 cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchmany() # Should get all rows self.failUnless(cur.rowcount in (-1,6)) self.assertEqual(len(rows),6) self.assertEqual(len(rows),6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0,6): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved by cursor.fetchmany' ) rows = cur.fetchmany() # Should return an empty list self.assertEqual(len(rows),0, 'cursor.fetchmany should return an empty sequence if ' 'called after the whole result set has been fetched' ) self.failUnless(cur.rowcount in (-1,6)) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) r = cur.fetchmany() # Should get empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence if ' 'query retrieved no rows' ) self.failUnless(cur.rowcount in (-1,0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error,cur.fetchall) cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchall() self.failUnless(cur.rowcount in (-1,len(self.samples))) self.assertEqual(len(rows),len(self.samples), 'cursor.fetchall did not retrieve all rows' ) rows = [r[0] for r in rows] rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'cursor.fetchall retrieved incorrect rows' ) rows = cur.fetchall() self.assertEqual( len(rows),0, 'cursor.fetchall should return an empty list if called ' 'after the whole result set has been fetched' ) self.failUnless(cur.rowcount in (-1,len(self.samples))) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) rows = cur.fetchall() self.failUnless(cur.rowcount in (-1,0)) self.assertEqual(len(rows),0, 'cursor.fetchall should return an empty list if ' 'a select query returns no rows' ) finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.failUnless(cur.rowcount in (-1,6)) self.assertEqual(len(rows23),2, 'fetchmany returned incorrect number of rows' ) self.assertEqual(len(rows56),2, 'fetchall returned incorrect number of rows' ) rows = [rows1[0]] rows.extend([rows23[0][0],rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0],rows56[1][0]]) rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved or inserted' ) finally: con.close() def help_nextset_setUp(self,cur): ''' Should create a procedure called deleteme that returns two result sets, first the number of rows in booze then "name from booze" ''' raise NotImplementedError('Helper not implemented') #sql=""" # create procedure deleteme as # begin # select count(*) from booze # select name from booze # end #""" #cur.execute(sql) def help_nextset_tearDown(self,cur): 'If cleaning up is needed after nextSetTest' raise NotImplementedError('Helper not implemented') #cur.execute("drop procedure deleteme") def test_nextset(self): con = self._connect() try: cur = con.cursor() if not hasattr(cur,'nextset'): return try: self.executeDDL1(cur) sql=self._populate() for sql in self._populate(): cur.execute(sql) self.help_nextset_setUp(cur) cur.callproc('deleteme') numberofrows=cur.fetchone() assert numberofrows[0]== len(self.samples) assert cur.nextset() names=cur.fetchall() assert len(names) == len(self.samples) s=cur.nextset() assert s == None,'No more return sets, should return None' finally: self.help_nextset_tearDown(cur) finally: con.close() def test_nextset(self): raise NotImplementedError('Drivers need to override this test') def test_arraysize(self): # Not much here - rest of the tests for this are in test_fetchmany con = self._connect() try: cur = con.cursor() self.failUnless(hasattr(cur,'arraysize'), 'cursor.arraysize must be defined' ) finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes( (25,) ) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): # Basic test is to make sure setoutputsize doesn't blow up con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000,0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): # Real test for setoutputsize is driver dependant raise NotImplementedError('Driver needed to override this test') def test_None(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchall() self.assertEqual(len(r),1) self.assertEqual(len(r[0]),1) self.assertEqual(r[0][0],None,'NULL value not returned as None') finally: con.close() def test_Date(self): d1 = self.driver.Date(2002,12,25) d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(d1),str(d2)) def test_Time(self): t1 = self.driver.Time(13,45,30) t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Timestamp(self): t1 = self.driver.Timestamp(2002,12,25,13,45,30) t2 = self.driver.TimestampFromTicks( time.mktime((2002,12,25,13,45,30,0,0,0)) ) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Binary(self): b = self.driver.Binary(str2bytes('Something')) b = self.driver.Binary(str2bytes('')) def test_STRING(self): self.failUnless(hasattr(self.driver,'STRING'), 'module.STRING must be defined' ) def test_BINARY(self): self.failUnless(hasattr(self.driver,'BINARY'), 'module.BINARY must be defined.' ) def test_NUMBER(self): self.failUnless(hasattr(self.driver,'NUMBER'), 'module.NUMBER must be defined.' ) def test_DATETIME(self): self.failUnless(hasattr(self.driver,'DATETIME'), 'module.DATETIME must be defined.' ) def test_ROWID(self): self.failUnless(hasattr(self.driver,'ROWID'), 'module.ROWID must be defined.' )
apache-2.0
Defector/IM-A820L_ics_kernel
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
1935
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
gpl-2.0
Velociraptor85/pyload
module/PullEvents.py
41
3688
# -*- coding: utf-8 -*- """ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. @author: mkaay """ from time import time from module.utils import uniqify class PullManager(): def __init__(self, core): self.core = core self.clients = [] def newClient(self, uuid): self.clients.append(Client(uuid)) def clean(self): for n, client in enumerate(self.clients): if client.lastActive + 30 < time(): del self.clients[n] def getEvents(self, uuid): events = [] validUuid = False for client in self.clients: if client.uuid == uuid: client.lastActive = time() validUuid = True while client.newEvents(): events.append(client.popEvent().toList()) break if not validUuid: self.newClient(uuid) events = [ReloadAllEvent("queue").toList(), ReloadAllEvent("collector").toList()] return uniqify(events, repr) def addEvent(self, event): for client in self.clients: client.addEvent(event) class Client(): def __init__(self, uuid): self.uuid = uuid self.lastActive = time() self.events = [] def newEvents(self): return len(self.events) > 0 def popEvent(self): if not len(self.events): return None return self.events.pop(0) def addEvent(self, event): self.events.append(event) class UpdateEvent(): def __init__(self, itype, iid, destination): assert itype == "pack" or itype == "file" assert destination == "queue" or destination == "collector" self.type = itype self.id = iid self.destination = destination def toList(self): return ["update", self.destination, self.type, self.id] class RemoveEvent(): def __init__(self, itype, iid, destination): assert itype == "pack" or itype == "file" assert destination == "queue" or destination == "collector" self.type = itype self.id = iid self.destination = destination def toList(self): return ["remove", self.destination, self.type, self.id] class InsertEvent(): def __init__(self, itype, iid, after, destination): assert itype == "pack" or itype == "file" assert destination == "queue" or destination == "collector" self.type = itype self.id = iid self.after = after self.destination = destination def toList(self): return ["insert", self.destination, self.type, self.id, self.after] class ReloadAllEvent(): def __init__(self, destination): assert destination == "queue" or destination == "collector" self.destination = destination def toList(self): return ["reload", self.destination] class AccountUpdateEvent(): def toList(self): return ["account"] class ConfigUpdateEvent(): def toList(self): return ["config"]
gpl-3.0
McNetic/CouchPotatoServer-de
libs/requests/packages/urllib3/contrib/ntlmpool.py
1010
4507
""" NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ try: from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection from logging import getLogger from ntlm import ntlm from urllib3 import HTTPSConnectionPool log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = 'https' def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' % (self.num_connections, self.host, self.authurl)) headers = {} headers['Connection'] = 'Keep-Alive' req_header = 'Authorization' resp_header = 'www-authenticate' conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = ( 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % reshdr) log.debug('Response data: %s [...]' % res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(', ') auth_header_value = None for s in auth_header_values: if s[:5] == 'NTLM ': auth_header_value = s[5:] if auth_header_value is None: raise Exception('Unexpected %s response header: %s' % (resp_header, reshdr[resp_header])) # Send authentication message ServerChallenge, NegotiateFlags = \ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags) headers[req_header] = 'NTLM %s' % auth_msg log.debug('Request headers: %s' % headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() log.debug('Response status: %s %s' % (res.status, res.reason)) log.debug('Response headers: %s' % dict(res.getheaders())) log.debug('Response data: %s [...]' % res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception('Server rejected request: wrong ' 'username or password') raise Exception('Wrong server response: %s %s' % (res.status, res.reason)) res.fp = None log.debug('Connection established') return conn def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True): if headers is None: headers = {} headers['Connection'] = 'Keep-Alive' return super(NTLMConnectionPool, self).urlopen(method, url, body, headers, retries, redirect, assert_same_host)
gpl-3.0
draekko/androguard
androguard/decompiler/dad/graph.py
23
16773
# This file is part of Androguard. # # Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import defaultdict from androguard.decompiler.dad.basic_blocks import (build_node_from_block, StatementBlock, CondBlock) from androguard.decompiler.dad.util import get_type from androguard.decompiler.dad.instruction import Variable logger = logging.getLogger('dad.graph') class Graph(object): def __init__(self): self.entry = None self.exit = None self.nodes = list() self.rpo = [] self.edges = defaultdict(list) self.catch_edges = defaultdict(list) self.reverse_edges = defaultdict(list) self.reverse_catch_edges = defaultdict(list) self.loc_to_ins = None self.loc_to_node = None def sucs(self, node): return self.edges.get(node, []) def all_sucs(self, node): return self.edges.get(node, []) + self.catch_edges.get(node, []) def preds(self, node): return [n for n in self.reverse_edges.get(node, []) if not n.in_catch] def all_preds(self, node): return (self.reverse_edges.get(node, []) + self.reverse_catch_edges.get(node, [])) def add_node(self, node): self.nodes.append(node) def add_edge(self, e1, e2): lsucs = self.edges[e1] if e2 not in lsucs: lsucs.append(e2) lpreds = self.reverse_edges[e2] if e1 not in lpreds: lpreds.append(e1) def add_catch_edge(self, e1, e2): lsucs = self.catch_edges[e1] if e2 not in lsucs: lsucs.append(e2) lpreds = self.reverse_catch_edges[e2] if e1 not in lpreds: lpreds.append(e1) def remove_node(self, node): preds = self.reverse_edges.get(node, []) for pred in preds: self.edges[pred].remove(node) succs = self.edges.get(node, []) for suc in succs: self.reverse_edges[suc].remove(node) exc_preds = self.reverse_catch_edges.pop(node, []) for pred in exc_preds: self.catch_edges[pred].remove(node) exc_succs = self.catch_edges.pop(node, []) for suc in exc_succs: self.reverse_catch_edges[suc].remove(node) self.nodes.remove(node) if node in self.rpo: self.rpo.remove(node) del node def number_ins(self): self.loc_to_ins = {} self.loc_to_node = {} num = 0 for node in self.rpo: start_node = num num = node.number_ins(num) end_node = num - 1 self.loc_to_ins.update(node.get_loc_with_ins()) self.loc_to_node[start_node, end_node] = node def get_ins_from_loc(self, loc): return self.loc_to_ins.get(loc) def get_node_from_loc(self, loc): for (start, end), node in self.loc_to_node.iteritems(): if start <= loc <= end: return node def remove_ins(self, loc): ins = self.get_ins_from_loc(loc) self.get_node_from_loc(loc).remove_ins(loc, ins) self.loc_to_ins.pop(loc) def compute_rpo(self): ''' Number the nodes in reverse post order. An RPO traversal visit as many predecessors of a node as possible before visiting the node itself. ''' nb = len(self.nodes) + 1 for node in self.post_order(): node.num = nb - node.po self.rpo = sorted(self.nodes, key=lambda n: n.num) def post_order(self): ''' Return the nodes of the graph in post-order i.e we visit all the children of a node before visiting the node itself. ''' def _visit(n, cnt): visited.add(n) for suc in self.all_sucs(n): if not suc in visited: for cnt, s in _visit(suc, cnt): yield cnt, s n.po = cnt yield cnt + 1, n visited = set() for _, node in _visit(self.entry, 1): yield node def draw(self, name, dname, draw_branches=True): from pydot import Dot, Edge g = Dot() g.set_node_defaults(color='lightgray', style='filled', shape='box', fontname='Courier', fontsize='10') for node in sorted(self.nodes, key=lambda x: x.num): if draw_branches and node.type.is_cond: g.add_edge(Edge(str(node), str(node.true), color='green')) g.add_edge(Edge(str(node), str(node.false), color='red')) else: for suc in self.sucs(node): g.add_edge(Edge(str(node), str(suc), color='blue')) for except_node in self.catch_edges.get(node, []): g.add_edge(Edge(str(node), str(except_node), color='black', style='dashed')) g.write_png('%s/%s.png' % (dname, name)) def immediate_dominators(self): return dom_lt(self) def __len__(self): return len(self.nodes) def __repr__(self): return str(self.nodes) def __iter__(self): for node in self.nodes: yield node def split_if_nodes(graph): ''' Split IfNodes in two nodes, the first node is the header node, the second one is only composed of the jump condition. ''' node_map = {n: n for n in graph} to_update = set() for node in graph.nodes[:]: if node.type.is_cond: if len(node.get_ins()) > 1: pre_ins = node.get_ins()[:-1] last_ins = node.get_ins()[-1] pre_node = StatementBlock('%s-pre' % node.name, pre_ins) cond_node = CondBlock('%s-cond' % node.name, [last_ins]) node_map[node] = pre_node node_map[pre_node] = pre_node node_map[cond_node] = cond_node pre_node.copy_from(node) cond_node.copy_from(node) for var in node.var_to_declare: pre_node.add_variable_declaration(var) pre_node.type.is_stmt = True cond_node.true = node.true cond_node.false = node.false for pred in graph.all_preds(node): pred_node = node_map[pred] # Verify that the link is not an exception link if node not in graph.sucs(pred): graph.add_catch_edge(pred_node, pre_node) continue if pred is node: pred_node = cond_node if pred.type.is_cond: # and not (pred is node): if pred.true is node: pred_node.true = pre_node if pred.false is node: pred_node.false = pre_node graph.add_edge(pred_node, pre_node) for suc in graph.sucs(node): graph.add_edge(cond_node, node_map[suc]) # We link all the exceptions to the pre node instead of the # condition node, which should not trigger any of them. for suc in graph.catch_edges.get(node, []): graph.add_catch_edge(pre_node, node_map[suc]) if node is graph.entry: graph.entry = pre_node graph.add_node(pre_node) graph.add_node(cond_node) graph.add_edge(pre_node, cond_node) pre_node.update_attribute_with(node_map) cond_node.update_attribute_with(node_map) graph.remove_node(node) else: to_update.add(node) for node in to_update: node.update_attribute_with(node_map) def simplify(graph): ''' Simplify the CFG by merging/deleting statement nodes when possible: If statement B follows statement A and if B has no other predecessor besides A, then we can merge A and B into a new statement node. We also remove nodes which do nothing except redirecting the control flow (nodes which only contains a goto). ''' redo = True while redo: redo = False node_map = {} to_update = set() for node in graph.nodes[:]: if node.type.is_stmt and node in graph: sucs = graph.all_sucs(node) if len(sucs) != 1: continue suc = sucs[0] if len(node.get_ins()) == 0: if any(pred.type.is_switch for pred in graph.all_preds(node)): continue if node is suc: continue node_map[node] = suc for pred in graph.all_preds(node): pred.update_attribute_with(node_map) if node not in graph.sucs(pred): graph.add_catch_edge(pred, suc) continue graph.add_edge(pred, suc) redo = True if node is graph.entry: graph.entry = suc graph.remove_node(node) elif (suc.type.is_stmt and len(graph.all_preds(suc)) == 1 and not (suc in graph.catch_edges) and not ((node is suc) or (suc is graph.entry))): ins_to_merge = suc.get_ins() node.add_ins(ins_to_merge) for var in suc.var_to_declare: node.add_variable_declaration(var) new_suc = graph.sucs(suc)[0] if new_suc: graph.add_edge(node, new_suc) for exception_suc in graph.catch_edges.get(suc, []): graph.add_catch_edge(node, exception_suc) redo = True graph.remove_node(suc) else: to_update.add(node) for node in to_update: node.update_attribute_with(node_map) def dom_lt(graph): '''Dominator algorithm from Lengaeur-Tarjan''' def _dfs(v, n): semi[v] = n = n + 1 vertex[n] = label[v] = v ancestor[v] = 0 for w in graph.all_sucs(v): if not semi[w]: parent[w] = v n = _dfs(w, n) pred[w].add(v) return n def _compress(v): u = ancestor[v] if ancestor[u]: _compress(u) if semi[label[u]] < semi[label[v]]: label[v] = label[u] ancestor[v] = ancestor[u] def _eval(v): if ancestor[v]: _compress(v) return label[v] return v def _link(v, w): ancestor[w] = v parent, ancestor, vertex = {}, {}, {} label, dom = {}, {} pred, bucket = defaultdict(set), defaultdict(set) # Step 1: semi = {v: 0 for v in graph.nodes} n = _dfs(graph.entry, 0) for i in xrange(n, 1, -1): w = vertex[i] # Step 2: for v in pred[w]: u = _eval(v) y = semi[w] = min(semi[w], semi[u]) bucket[vertex[y]].add(w) pw = parent[w] _link(pw, w) # Step 3: bpw = bucket[pw] while bpw: v = bpw.pop() u = _eval(v) dom[v] = u if semi[u] < semi[v] else pw # Step 4: for i in range(2, n + 1): w = vertex[i] dw = dom[w] if dw != vertex[semi[w]]: dom[w] = dom[dw] dom[graph.entry] = None return dom def bfs(start): to_visit = [start] visited = set([start]) while to_visit: node = to_visit.pop(0) yield node if node.exception_analysis: for _, _, exception in node.exception_analysis.exceptions: if exception not in visited: to_visit.append(exception) visited.add(exception) for _, _, child in node.childs: if child not in visited: to_visit.append(child) visited.add(child) class GenInvokeRetName(object): def __init__(self): self.num = 0 self.ret = None def new(self): self.num += 1 self.ret = Variable('tmp%d' % self.num) return self.ret def set_to(self, ret): self.ret = ret def last(self): return self.ret def make_node(graph, block, block_to_node, vmap, gen_ret): node = block_to_node.get(block) if node is None: node = build_node_from_block(block, vmap, gen_ret) block_to_node[block] = node if block.exception_analysis: for _type, _, exception_target in block.exception_analysis.exceptions: exception_node = block_to_node.get(exception_target) if exception_node is None: exception_node = build_node_from_block(exception_target, vmap, gen_ret, _type) exception_node.set_catch_type(_type) exception_node.in_catch = True block_to_node[exception_target] = exception_node graph.add_catch_edge(node, exception_node) for _, _, child_block in block.childs: child_node = block_to_node.get(child_block) if child_node is None: child_node = build_node_from_block(child_block, vmap, gen_ret) block_to_node[child_block] = child_node graph.add_edge(node, child_node) if node.type.is_switch: node.add_case(child_node) if node.type.is_cond: if_target = ((block.end / 2) - (block.last_length / 2) + node.off_last_ins) child_addr = child_block.start / 2 if if_target == child_addr: node.true = child_node else: node.false = child_node # Check that both branch of the if point to something # It may happen that both branch point to the same node, in this case # the false branch will be None. So we set it to the right node. # TODO: In this situation, we should transform the condition node into # a statement node if node.type.is_cond and node.false is None: node.false = node.true return node def construct(start_block, vmap, exceptions): bfs_blocks = bfs(start_block) graph = Graph() gen_ret = GenInvokeRetName() # Construction of a mapping of basic blocks into Nodes block_to_node = {} exceptions_start_block = [] for exception in exceptions: for _, _, block in exception.exceptions: exceptions_start_block.append(block) for block in bfs_blocks: node = make_node(graph, block, block_to_node, vmap, gen_ret) graph.add_node(node) graph.entry = block_to_node[start_block] del block_to_node, bfs_blocks graph.compute_rpo() graph.number_ins() for node in graph.rpo: preds = [pred for pred in graph.all_preds(node) if pred.num < node.num] if preds and all(pred.in_catch for pred in preds): node.in_catch = True # Create a list of Node which are 'return' node # There should be one and only one node of this type # If this is not the case, try to continue anyway by setting the exit node # to the one which has the greatest RPO number (not necessarily the case) lexit_nodes = [node for node in graph if node.type.is_return] if len(lexit_nodes) > 1: # Not sure that this case is possible... logger.error('Multiple exit nodes found !') graph.exit = graph.rpo[-1] elif len(lexit_nodes) < 1: # A method can have no return if it has throw statement(s) or if its # body is a while(1) whitout break/return. logger.debug('No exit node found !') else: graph.exit = lexit_nodes[0] return graph
apache-2.0
nongxiaoming/rt-thread
bsp/imx6sx/cortex-a9/rtconfig.py
29
2499
import os # toolchains options ARCH='arm' CPU='i.mx6' CROSS_TOOL='gcc' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' # EXEC_PATH = r'/opt/arm-2012.09/bin' EXEC_PATH = '/opt/gcc-arm-none-eabi-4_8-2014q1_gri/bin' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = 'C:/Keil' if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' CXX = PREFIX + 'g++' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -march=armv7-a -mtune=cortex-a9 -mfpu=vfpv3-d16 -ftree-vectorize -ffast-math -mfloat-abi=softfp' CFLAGS = DEVICE + ' -Wall' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__' LINK_SCRIPT = 'imx6.lds' LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-imx6.map,-cref,-u,system_vectors'+\ ' -T %s' % LINK_SCRIPT CPATH = '' LPATH = '' # generate debug info in all cases AFLAGS += ' -gdwarf-2' CFLAGS += ' -g -gdwarf-2' if BUILD == 'debug': CFLAGS += ' -O0' else: CFLAGS += ' -O2' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\ SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' CXX = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --device DARMP' CFLAGS = DEVICE + ' --apcs=interwork' AFLAGS = DEVICE LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-imx6.map --scatter imx6.sct' CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC' LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB' EXEC_PATH += '/arm/bin40/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = ' --cpu DARMP' CFLAGS = '' AFLAGS = '' LFLAGS = ' --config imx6.icf' EXEC_PATH += '/arm/bin/' RT_USING_MINILIBC = False POST_ACTION = ''
apache-2.0
jwlawson/tensorflow
tensorflow/contrib/distributions/python/ops/geometric.py
56
7542
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Geometric distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import util as distribution_util class Geometric(distribution.Distribution): """Geometric distribution. The Geometric distribution is parameterized by p, the probability of a positive event. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before seeing a success. The pmf of this distribution is: #### Mathematical Details ```none pmf(k; p) = (1 - p)**k * p ``` where: * `p` is the success probability, `0 < p <= 1`, and, * `k` is a non-negative integer. """ def __init__(self, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name="Geometric"): """Construct Geometric distributions. Args: logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents logits for the probability of success for independent Geometric distributions and must be in the range `(-inf, inf]`. Only one of `logits` or `probs` should be specified. probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents the probability of success for independent Geometric distributions and must be in the range `(0, 1]`. Only one of `logits` or `probs` should be specified. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[logits, probs]): self._logits, self._probs = distribution_util.get_logits_and_probs( logits, probs, validate_args=validate_args, name=name) with ops.control_dependencies( [check_ops.assert_positive(self._probs)] if validate_args else []): self._probs = array_ops.identity(self._probs, name="probs") super(Geometric, self).__init__( dtype=self._probs.dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._probs, self._logits], name=name) @property def logits(self): """Log-odds of a `1` outcome (vs `0`).""" return self._logits @property def probs(self): """Probability of a `1` outcome (vs `0`).""" return self._probs def _batch_shape_tensor(self): return array_ops.shape(self._probs) def _batch_shape(self): return self.probs.get_shape() def _event_shape_tensor(self): return array_ops.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. sampled = random_ops.random_uniform( array_ops.concat([[n], array_ops.shape(self._probs)], 0), minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., seed=seed, dtype=self.dtype) return math_ops.floor( math_ops.log(sampled) / math_ops.log1p(-self.probs)) def _cdf(self, x): if self.validate_args: x = distribution_util.embed_check_nonnegative_integer_form(x) else: # Whether or not x is integer-form, the following is well-defined. # However, scipy takes the floor, so we do too. x = math_ops.floor(x) x *= array_ops.ones_like(self.probs) return array_ops.where( x < 0., array_ops.zeros_like(x), -math_ops.expm1((1. + x) * math_ops.log1p(-self.probs))) def _log_prob(self, x): if self.validate_args: x = distribution_util.embed_check_nonnegative_integer_form(x) else: # For consistency with cdf, we take the floor. x = math_ops.floor(x) x *= array_ops.ones_like(self.probs) probs = self.probs * array_ops.ones_like(x) safe_domain = array_ops.where( math_ops.equal(x, 0.), array_ops.zeros_like(probs), probs) return x * math_ops.log1p(-safe_domain) + math_ops.log(probs) def _entropy(self): probs = self._probs if self.validate_args: probs = control_flow_ops.with_dependencies( [check_ops.assert_less( probs, constant_op.constant(1., probs.dtype), message="Entropy is undefined when logits = inf or probs = 1.")], probs) # Claim: entropy(p) = softplus(s)/p - s # where s=logits and p=probs. # # Proof: # # entropy(p) # := -[(1-p)log(1-p) + plog(p)]/p # = -[log(1-p) + plog(p/(1-p))]/p # = -[-softplus(s) + ps]/p # = softplus(s)/p - s # # since, # log[1-sigmoid(s)] # = log[1/(1+exp(s)] # = -log[1+exp(s)] # = -softplus(s) # # using the fact that, # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s)) return nn.softplus(self.logits) / probs - self.logits def _mean(self): return math_ops.exp(-self.logits) def _variance(self): return self._mean() / self.probs def _mode(self): return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
apache-2.0
esikachev/sahara-backup
sahara/tests/integration/tests/scaling.py
11
5131
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import excutils from sahara.tests.integration.tests import base class ScalingTest(base.ITestCase): def _change_node_info_while_ng_adding(self, ngt_id, count, cluster_info): cluster_info['node_info']['node_count'] += count node_processes = self.sahara.node_group_templates.get( ngt_id).node_processes if cluster_info['plugin_config'].PROCESS_NAMES['tt'] in node_processes: cluster_info['node_info']['tasktracker_count'] += count if cluster_info['plugin_config'].PROCESS_NAMES['dn'] in node_processes: cluster_info['node_info']['datanode_count'] += count def _change_node_info_while_ng_resizing(self, name, count, cluster_info): node_groups = self.sahara.clusters.get( cluster_info['cluster_id']).node_groups for node_group in node_groups: if node_group['name'] == name: processes = node_group['node_processes'] old_count = node_group['count'] cluster_info['node_info']['node_count'] += -old_count + count if cluster_info['plugin_config'].PROCESS_NAMES['tt'] in processes: cluster_info['node_info']['tasktracker_count'] += ( -old_count + count ) if cluster_info['plugin_config'].PROCESS_NAMES['dn'] in processes: cluster_info['node_info']['datanode_count'] += -old_count + count @staticmethod def _add_new_field_to_scale_body_while_ng_resizing( scale_body, name, count): scale_body['resize_node_groups'].append( { 'name': name, 'count': count } ) @staticmethod def _add_new_field_to_scale_body_while_ng_adding( scale_body, ngt_id, count, name): scale_body['add_node_groups'].append( { 'node_group_template_id': ngt_id, 'count': count, 'name': name } ) @base.skip_test('SKIP_SCALING_TEST', 'Test for cluster scaling was skipped.') def cluster_scaling(self, cluster_info, change_list): scale_body = {'add_node_groups': [], 'resize_node_groups': []} for change in change_list: if change['operation'] == 'resize': node_group_name = change['info'][0] node_group_size = change['info'][1] self._add_new_field_to_scale_body_while_ng_resizing( scale_body, node_group_name, node_group_size ) self._change_node_info_while_ng_resizing( node_group_name, node_group_size, cluster_info ) if change['operation'] == 'add': node_group_name = change['info'][0] node_group_size = change['info'][1] node_group_id = change['info'][2] self._add_new_field_to_scale_body_while_ng_adding( scale_body, node_group_id, node_group_size, node_group_name ) self._change_node_info_while_ng_adding( node_group_id, node_group_size, cluster_info ) scale_body = {key: value for key, value in scale_body.items() if value} self.sahara.clusters.scale(cluster_info['cluster_id'], scale_body) self.poll_cluster_state(cluster_info['cluster_id']) new_node_ip_list = self.get_cluster_node_ip_list_with_node_processes( cluster_info['cluster_id'] ) try: new_node_info = self.get_node_info(new_node_ip_list, cluster_info['plugin_config']) except Exception as e: with excutils.save_and_reraise_exception(): print( '\nFailure during check of node process deployment ' 'on cluster node: ' + str(e) ) expected_node_info = cluster_info['node_info'] self.assertEqual( expected_node_info, new_node_info, 'Failure while node info comparison.\n' 'Expected node info after cluster scaling: %s.\n' 'Actual node info after cluster scaling: %s.' % (expected_node_info, new_node_info) ) return { 'cluster_id': cluster_info['cluster_id'], 'node_ip_list': new_node_ip_list, 'node_info': new_node_info, 'plugin_config': cluster_info['plugin_config'] }
apache-2.0
bcrochet/python-ironicclient
ironicclient/openstack/common/importutils.py
7
2166
# -*- coding: utf-8 -*- # # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Import related utilities and helper functions. """ import sys import traceback def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) def import_object(import_str, *args, **kwargs): """Import a class and return an instance of it.""" return import_class(import_str)(*args, **kwargs) def import_object_ns(name_space, import_str, *args, **kwargs): """ Import a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs) def import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str] def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: return import_module(import_str) except ImportError: return default
apache-2.0
makhtardiouf/makhtardiouf.github.io
angularcode/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py
1382
30567
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This script is intended for use as a GYP_GENERATOR. It takes as input (by way of the generator flag config_path) the path of a json file that dictates the files and targets to search for. The following keys are supported: files: list of paths (relative) of the files to search for. test_targets: unqualified target names to search for. Any target in this list that depends upon a file in |files| is output regardless of the type of target or chain of dependencies. additional_compile_targets: Unqualified targets to search for in addition to test_targets. Targets in the combined list that depend upon a file in |files| are not necessarily output. For example, if the target is of type none then the target is not output (but one of the descendants of the target will be). The following is output: error: only supplied if there is an error. compile_targets: minimal set of targets that directly or indirectly (for targets of type none) depend on the files in |files| and is one of the supplied targets or a target that one of the supplied targets depends on. The expectation is this set of targets is passed into a build step. This list always contains the output of test_targets as well. test_targets: set of targets from the supplied |test_targets| that either directly or indirectly depend upon a file in |files|. This list if useful if additional processing needs to be done for certain targets after the build, such as running tests. status: outputs one of three values: none of the supplied files were found, one of the include files changed so that it should be assumed everything changed (in this case test_targets and compile_targets are not output) or at least one file was found. invalid_targets: list of supplied targets that were not found. Example: Consider a graph like the following: A D / \ B C A depends upon both B and C, A is of type none and B and C are executables. D is an executable, has no dependencies and nothing depends on it. If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then the following is output: |compile_targets| = ["B"] B must built as it depends upon the changed file b.cc and the supplied target A depends upon it. A is not output as a build_target as it is of type none with no rules and actions. |test_targets| = ["B"] B directly depends upon the change file b.cc. Even though the file d.cc, which D depends upon, has changed D is not output as it was not supplied by way of |additional_compile_targets| or |test_targets|. If the generator flag analyzer_output_path is specified, output is written there. Otherwise output is written to stdout. In Gyp the "all" target is shorthand for the root targets in the files passed to gyp. For example, if file "a.gyp" contains targets "a1" and "a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2". Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp then the "all" target includes "b1" and "b2". """ import gyp.common import gyp.ninja_syntax as ninja_syntax import json import os import posixpath import sys debug = False found_dependency_string = 'Found dependency' no_dependency_string = 'No dependencies' # Status when it should be assumed that everything has changed. all_changed_string = 'Found dependency (all)' # MatchStatus is used indicate if and how a target depends upon the supplied # sources. # The target's sources contain one of the supplied paths. MATCH_STATUS_MATCHES = 1 # The target has a dependency on another target that contains one of the # supplied paths. MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2 # The target's sources weren't in the supplied paths and none of the target's # dependencies depend upon a target that matched. MATCH_STATUS_DOESNT_MATCH = 3 # The target doesn't contain the source, but the dependent targets have not yet # been visited to determine a more specific status yet. MATCH_STATUS_TBD = 4 generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: generator_default_variables[dirname] = '!!!' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def _ToGypPath(path): """Converts a path to the format used by gyp.""" if os.sep == '\\' and os.altsep == '/': return path.replace('\\', '/') return path def _ResolveParent(path, base_path_components): """Resolves |path|, which starts with at least one '../'. Returns an empty string if the path shouldn't be considered. See _AddSources() for a description of |base_path_components|.""" depth = 0 while path.startswith('../'): depth += 1 path = path[3:] # Relative includes may go outside the source tree. For example, an action may # have inputs in /usr/include, which are not in the source tree. if depth > len(base_path_components): return '' if depth == len(base_path_components): return path return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \ '/' + path def _AddSources(sources, base_path, base_path_components, result): """Extracts valid sources from |sources| and adds them to |result|. Each source file is relative to |base_path|, but may contain '..'. To make resolving '..' easier |base_path_components| contains each of the directories in |base_path|. Additionally each source may contain variables. Such sources are ignored as it is assumed dependencies on them are expressed and tracked in some other means.""" # NOTE: gyp paths are always posix style. for source in sources: if not len(source) or source.startswith('!!!') or source.startswith('$'): continue # variable expansion may lead to //. org_source = source source = source[0] + source[1:].replace('//', '/') if source.startswith('../'): source = _ResolveParent(source, base_path_components) if len(source): result.append(source) continue result.append(base_path + source) if debug: print 'AddSource', org_source, result[len(result) - 1] def _ExtractSourcesFromAction(action, base_path, base_path_components, results): if 'inputs' in action: _AddSources(action['inputs'], base_path, base_path_components, results) def _ToLocalPath(toplevel_dir, path): """Converts |path| to a path relative to |toplevel_dir|.""" if path == toplevel_dir: return '' if path.startswith(toplevel_dir + '/'): return path[len(toplevel_dir) + len('/'):] return path def _ExtractSources(target, target_dict, toplevel_dir): # |target| is either absolute or relative and in the format of the OS. Gyp # source paths are always posix. Convert |target| to a posix path relative to # |toplevel_dir_|. This is done to make it easy to build source paths. base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target))) base_path_components = base_path.split('/') # Add a trailing '/' so that _AddSources() can easily build paths. if len(base_path): base_path += '/' if debug: print 'ExtractSources', target, base_path results = [] if 'sources' in target_dict: _AddSources(target_dict['sources'], base_path, base_path_components, results) # Include the inputs from any actions. Any changes to these affect the # resulting output. if 'actions' in target_dict: for action in target_dict['actions']: _ExtractSourcesFromAction(action, base_path, base_path_components, results) if 'rules' in target_dict: for rule in target_dict['rules']: _ExtractSourcesFromAction(rule, base_path, base_path_components, results) return results class Target(object): """Holds information about a particular target: deps: set of Targets this Target depends upon. This is not recursive, only the direct dependent Targets. match_status: one of the MatchStatus values. back_deps: set of Targets that have a dependency on this Target. visited: used during iteration to indicate whether we've visited this target. This is used for two iterations, once in building the set of Targets and again in _GetBuildTargets(). name: fully qualified name of the target. requires_build: True if the target type is such that it needs to be built. See _DoesTargetTypeRequireBuild for details. added_to_compile_targets: used when determining if the target was added to the set of targets that needs to be built. in_roots: true if this target is a descendant of one of the root nodes. is_executable: true if the type of target is executable. is_static_library: true if the type of target is static_library. is_or_has_linked_ancestor: true if the target does a link (eg executable), or if there is a target in back_deps that does a link.""" def __init__(self, name): self.deps = set() self.match_status = MATCH_STATUS_TBD self.back_deps = set() self.name = name # TODO(sky): I don't like hanging this off Target. This state is specific # to certain functions and should be isolated there. self.visited = False self.requires_build = False self.added_to_compile_targets = False self.in_roots = False self.is_executable = False self.is_static_library = False self.is_or_has_linked_ancestor = False class Config(object): """Details what we're looking for files: set of files to search for targets: see file description for details.""" def __init__(self): self.files = [] self.targets = set() self.additional_compile_target_names = set() self.test_target_names = set() def Init(self, params): """Initializes Config. This is a separate method as it raises an exception if there is a parse error.""" generator_flags = params.get('generator_flags', {}) config_path = generator_flags.get('config_path', None) if not config_path: return try: f = open(config_path, 'r') config = json.load(f) f.close() except IOError: raise Exception('Unable to open file ' + config_path) except ValueError as e: raise Exception('Unable to parse config file ' + config_path + str(e)) if not isinstance(config, dict): raise Exception('config_path must be a JSON file containing a dictionary') self.files = config.get('files', []) self.additional_compile_target_names = set( config.get('additional_compile_targets', [])) self.test_target_names = set(config.get('test_targets', [])) def _WasBuildFileModified(build_file, data, files, toplevel_dir): """Returns true if the build file |build_file| is either in |files| or one of the files included by |build_file| is in |files|. |toplevel_dir| is the root of the source tree.""" if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files: if debug: print 'gyp file modified', build_file return True # First element of included_files is the file itself. if len(data[build_file]['included_files']) <= 1: return False for include_file in data[build_file]['included_files'][1:]: # |included_files| are relative to the directory of the |build_file|. rel_include_file = \ _ToGypPath(gyp.common.UnrelativePath(include_file, build_file)) if _ToLocalPath(toplevel_dir, rel_include_file) in files: if debug: print 'included gyp file modified, gyp_file=', build_file, \ 'included file=', rel_include_file return True return False def _GetOrCreateTargetByName(targets, target_name): """Creates or returns the Target at targets[target_name]. If there is no Target for |target_name| one is created. Returns a tuple of whether a new Target was created and the Target.""" if target_name in targets: return False, targets[target_name] target = Target(target_name) targets[target_name] = target return True, target def _DoesTargetTypeRequireBuild(target_dict): """Returns true if the target type is such that it needs to be built.""" # If a 'none' target has rules or actions we assume it requires a build. return bool(target_dict['type'] != 'none' or target_dict.get('actions') or target_dict.get('rules')) def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files, build_files): """Returns a tuple of the following: . A dictionary mapping from fully qualified name to Target. . A list of the targets that have a source file in |files|. . Targets that constitute the 'all' target. See description at top of file for details on the 'all' target. This sets the |match_status| of the targets that contain any of the source files in |files| to MATCH_STATUS_MATCHES. |toplevel_dir| is the root of the source tree.""" # Maps from target name to Target. name_to_target = {} # Targets that matched. matching_targets = [] # Queue of targets to visit. targets_to_visit = target_list[:] # Maps from build file to a boolean indicating whether the build file is in # |files|. build_file_in_files = {} # Root targets across all files. roots = set() # Set of Targets in |build_files|. build_file_targets = set() while len(targets_to_visit) > 0: target_name = targets_to_visit.pop() created_target, target = _GetOrCreateTargetByName(name_to_target, target_name) if created_target: roots.add(target) elif target.visited: continue target.visited = True target.requires_build = _DoesTargetTypeRequireBuild( target_dicts[target_name]) target_type = target_dicts[target_name]['type'] target.is_executable = target_type == 'executable' target.is_static_library = target_type == 'static_library' target.is_or_has_linked_ancestor = (target_type == 'executable' or target_type == 'shared_library') build_file = gyp.common.ParseQualifiedTarget(target_name)[0] if not build_file in build_file_in_files: build_file_in_files[build_file] = \ _WasBuildFileModified(build_file, data, files, toplevel_dir) if build_file in build_files: build_file_targets.add(target) # If a build file (or any of its included files) is modified we assume all # targets in the file are modified. if build_file_in_files[build_file]: print 'matching target from modified build file', target_name target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) else: sources = _ExtractSources(target_name, target_dicts[target_name], toplevel_dir) for source in sources: if _ToGypPath(os.path.normpath(source)) in files: print 'target', target_name, 'matches', source target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) break # Add dependencies to visit as well as updating back pointers for deps. for dep in target_dicts[target_name].get('dependencies', []): targets_to_visit.append(dep) created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target, dep) if not created_dep_target: roots.discard(dep_target) target.deps.add(dep_target) dep_target.back_deps.add(target) return name_to_target, matching_targets, roots & build_file_targets def _GetUnqualifiedToTargetMapping(all_targets, to_find): """Returns a tuple of the following: . mapping (dictionary) from unqualified name to Target for all the Targets in |to_find|. . any target names not found. If this is empty all targets were found.""" result = {} if not to_find: return {}, [] to_find = set(to_find) for target_name in all_targets.keys(): extracted = gyp.common.ParseQualifiedTarget(target_name) if len(extracted) > 1 and extracted[1] in to_find: to_find.remove(extracted[1]) result[extracted[1]] = all_targets[target_name] if not to_find: return result, [] return result, [x for x in to_find] def _DoesTargetDependOnMatchingTargets(target): """Returns true if |target| or any of its dependencies is one of the targets containing the files supplied as input to analyzer. This updates |matches| of the Targets as it recurses. target: the Target to look for.""" if target.match_status == MATCH_STATUS_DOESNT_MATCH: return False if target.match_status == MATCH_STATUS_MATCHES or \ target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY: return True for dep in target.deps: if _DoesTargetDependOnMatchingTargets(dep): target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY print '\t', target.name, 'matches by dep', dep.name return True target.match_status = MATCH_STATUS_DOESNT_MATCH return False def _GetTargetsDependingOnMatchingTargets(possible_targets): """Returns the list of Targets in |possible_targets| that depend (either directly on indirectly) on at least one of the targets containing the files supplied as input to analyzer. possible_targets: targets to search from.""" found = [] print 'Targets that matched by dependency:' for target in possible_targets: if _DoesTargetDependOnMatchingTargets(target): found.append(target) return found def _AddCompileTargets(target, roots, add_if_no_ancestor, result): """Recurses through all targets that depend on |target|, adding all targets that need to be built (and are in |roots|) to |result|. roots: set of root targets. add_if_no_ancestor: If true and there are no ancestors of |target| then add |target| to |result|. |target| must still be in |roots|. result: targets that need to be built are added here.""" if target.visited: return target.visited = True target.in_roots = target in roots for back_dep_target in target.back_deps: _AddCompileTargets(back_dep_target, roots, False, result) target.added_to_compile_targets |= back_dep_target.added_to_compile_targets target.in_roots |= back_dep_target.in_roots target.is_or_has_linked_ancestor |= ( back_dep_target.is_or_has_linked_ancestor) # Always add 'executable' targets. Even though they may be built by other # targets that depend upon them it makes detection of what is going to be # built easier. # And always add static_libraries that have no dependencies on them from # linkables. This is necessary as the other dependencies on them may be # static libraries themselves, which are not compile time dependencies. if target.in_roots and \ (target.is_executable or (not target.added_to_compile_targets and (add_if_no_ancestor or target.requires_build)) or (target.is_static_library and add_if_no_ancestor and not target.is_or_has_linked_ancestor)): print '\t\tadding to compile targets', target.name, 'executable', \ target.is_executable, 'added_to_compile_targets', \ target.added_to_compile_targets, 'add_if_no_ancestor', \ add_if_no_ancestor, 'requires_build', target.requires_build, \ 'is_static_library', target.is_static_library, \ 'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor result.add(target) target.added_to_compile_targets = True def _GetCompileTargets(matching_targets, supplied_targets): """Returns the set of Targets that require a build. matching_targets: targets that changed and need to be built. supplied_targets: set of targets supplied to analyzer to search from.""" result = set() for target in matching_targets: print 'finding compile targets for match', target.name _AddCompileTargets(target, supplied_targets, True, result) return result def _WriteOutput(params, **values): """Writes the output, either to stdout or a file is specified.""" if 'error' in values: print 'Error:', values['error'] if 'status' in values: print values['status'] if 'targets' in values: values['targets'].sort() print 'Supplied targets that depend on changed files:' for target in values['targets']: print '\t', target if 'invalid_targets' in values: values['invalid_targets'].sort() print 'The following targets were not found:' for target in values['invalid_targets']: print '\t', target if 'build_targets' in values: values['build_targets'].sort() print 'Targets that require a build:' for target in values['build_targets']: print '\t', target if 'compile_targets' in values: values['compile_targets'].sort() print 'Targets that need to be built:' for target in values['compile_targets']: print '\t', target if 'test_targets' in values: values['test_targets'].sort() print 'Test targets:' for target in values['test_targets']: print '\t', target output_path = params.get('generator_flags', {}).get( 'analyzer_output_path', None) if not output_path: print json.dumps(values) return try: f = open(output_path, 'w') f.write(json.dumps(values) + '\n') f.close() except IOError as e: print 'Error writing to output file', output_path, str(e) def _WasGypIncludeFileModified(params, files): """Returns true if one of the files in |files| is in the set of included files.""" if params['options'].includes: for include in params['options'].includes: if _ToGypPath(os.path.normpath(include)) in files: print 'Include file modified, assuming all changed', include return True return False def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping] def _LookupTargets(names, mapping): """Returns a list of the mapping[name] for each value in |names| that is in |mapping|.""" return [mapping[name] for name in names if name in mapping] def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') elif flavor == 'win': default_variables.setdefault('OS', 'win') # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) class TargetCalculator(object): """Calculates the matching test_targets and matching compile_targets.""" def __init__(self, files, additional_compile_target_names, test_target_names, data, target_list, target_dicts, toplevel_dir, build_files): self._additional_compile_target_names = set(additional_compile_target_names) self._test_target_names = set(test_target_names) self._name_to_target, self._changed_targets, self._root_targets = ( _GenerateTargets(data, target_list, target_dicts, toplevel_dir, frozenset(files), build_files)) self._unqualified_mapping, self.invalid_targets = ( _GetUnqualifiedToTargetMapping(self._name_to_target, self._supplied_target_names_no_all())) def _supplied_target_names(self): return self._additional_compile_target_names | self._test_target_names def _supplied_target_names_no_all(self): """Returns the supplied test targets without 'all'.""" result = self._supplied_target_names(); result.discard('all') return result def is_build_impacted(self): """Returns true if the supplied files impact the build at all.""" return self._changed_targets def find_matching_test_target_names(self): """Returns the set of output test targets.""" assert self.is_build_impacted() # Find the test targets first. 'all' is special cased to mean all the # root targets. To deal with all the supplied |test_targets| are expanded # to include the root targets during lookup. If any of the root targets # match, we remove it and replace it with 'all'. test_target_names_no_all = set(self._test_target_names) test_target_names_no_all.discard('all') test_targets_no_all = _LookupTargets(test_target_names_no_all, self._unqualified_mapping) test_target_names_contains_all = 'all' in self._test_target_names if test_target_names_contains_all: test_targets = [x for x in (set(test_targets_no_all) | set(self._root_targets))] else: test_targets = [x for x in test_targets_no_all] print 'supplied test_targets' for target_name in self._test_target_names: print '\t', target_name print 'found test_targets' for target in test_targets: print '\t', target.name print 'searching for matching test targets' matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets) matching_test_targets_contains_all = (test_target_names_contains_all and set(matching_test_targets) & set(self._root_targets)) if matching_test_targets_contains_all: # Remove any of the targets for all that were not explicitly supplied, # 'all' is subsequentely added to the matching names below. matching_test_targets = [x for x in (set(matching_test_targets) & set(test_targets_no_all))] print 'matched test_targets' for target in matching_test_targets: print '\t', target.name matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1] for target in matching_test_targets] if matching_test_targets_contains_all: matching_target_names.append('all') print '\tall' return matching_target_names def find_matching_compile_target_names(self): """Returns the set of output compile targets.""" assert self.is_build_impacted(); # Compile targets are found by searching up from changed targets. # Reset the visited status for _GetBuildTargets. for target in self._name_to_target.itervalues(): target.visited = False supplied_targets = _LookupTargets(self._supplied_target_names_no_all(), self._unqualified_mapping) if 'all' in self._supplied_target_names(): supplied_targets = [x for x in (set(supplied_targets) | set(self._root_targets))] print 'Supplied test_targets & compile_targets' for target in supplied_targets: print '\t', target.name print 'Finding compile targets' compile_targets = _GetCompileTargets(self._changed_targets, supplied_targets) return [gyp.common.ParseQualifiedTarget(target.name)[1] for target in compile_targets] def GenerateOutput(target_list, target_dicts, data, params): """Called by gyp as the final stage. Outputs results.""" config = Config() try: config.Init(params) if not config.files: raise Exception('Must specify files to analyze via config_path generator ' 'flag') toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir)) if debug: print 'toplevel_dir', toplevel_dir if _WasGypIncludeFileModified(params, config.files): result_dict = { 'status': all_changed_string, 'test_targets': list(config.test_target_names), 'compile_targets': list( config.additional_compile_target_names | config.test_target_names) } _WriteOutput(params, **result_dict) return calculator = TargetCalculator(config.files, config.additional_compile_target_names, config.test_target_names, data, target_list, target_dicts, toplevel_dir, params['build_files']) if not calculator.is_build_impacted(): result_dict = { 'status': no_dependency_string, 'test_targets': [], 'compile_targets': [] } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) return test_target_names = calculator.find_matching_test_target_names() compile_target_names = calculator.find_matching_compile_target_names() found_at_least_one_target = compile_target_names or test_target_names result_dict = { 'test_targets': test_target_names, 'status': found_dependency_string if found_at_least_one_target else no_dependency_string, 'compile_targets': list( set(compile_target_names) | set(test_target_names)) } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) except Exception as e: _WriteOutput(params, error=str(e))
mit
acroreiser/kernel_samsung_msm
scripts/gcc-wrapper.py
74
3905
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "alignment.c:720", "async.c:122", "async.c:270", "dir.c:43", "dm.c:1053", "dm.c:1080", "dm-table.c:1120", "dm-table.c:1126", "drm_edid.c:1303", "eventpoll.c:1143", "f_mass_storage.c:3368", "inode.c:72", "inode.c:73", "inode.c:74", "msm_sdcc.c:126", "msm_sdcc.c:128", "nf_conntrack_netlink.c:790", "nf_nat_standalone.c:118", "return_address.c:62", "soc-core.c:1719", "xt_log.h:50", "vx6953.c:3124", "dma-mapping.c:238", "dma-mapping.c:284", "xt_log.h:50", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
gpl-2.0
pypingou/pagure
pagure/hooks/irc.py
1
3772
# -*- coding: utf-8 -*- """ (c) 2014-2016 - Copyright Red Hat Inc Authors: Pierre-Yves Chibon <pingou@pingoured.fr> """ from __future__ import unicode_literals, absolute_import import sqlalchemy as sa import pygit2 import wtforms try: from flask_wtf import FlaskForm except ImportError: from flask_wtf import Form as FlaskForm from sqlalchemy.orm import relation from sqlalchemy.orm import backref from pagure.hooks import BaseHook, RequiredIf from pagure.lib.model import BASE, Project from pagure.utils import get_repo_path class IrcTable(BASE): """ Stores information about the irc hook deployed on a project. Table -- hook_irc """ __tablename__ = "hook_irc" id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column( sa.Integer, sa.ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False, unique=True, index=True, ) server = sa.Column(sa.Text, nullable=False) port = sa.Column(sa.Text, nullable=False) room = sa.Column(sa.Text, nullable=False) nick = sa.Column(sa.Text, nullable=True, default=None) nick_pass = sa.Column(sa.Text, nullable=True, default=None) active = sa.Column(sa.Boolean, nullable=False, default=False) join = sa.Column(sa.Boolean, nullable=False, default=True) ssl = sa.Column(sa.Boolean, nullable=False, default=True) project = relation( "Project", remote_side=[Project.id], backref=backref( "irc_hook", cascade="delete, delete-orphan", single_parent=True, uselist=False, ), ) class IrcForm(FlaskForm): """ Form to configure the irc hook. """ server = wtforms.StringField( 'Server <span class="error">*</span>', [RequiredIf("active")] ) port = wtforms.StringField( 'Port <span class="error">*</span>', [RequiredIf("active")] ) room = wtforms.StringField( 'Room <span class="error">*</span>', [RequiredIf("active")] ) nick = wtforms.StringField("Nick", [wtforms.validators.Optional()]) nick_pass = wtforms.StringField( "Nickserv Password", [wtforms.validators.Optional()] ) active = wtforms.BooleanField("Active", [wtforms.validators.Optional()]) join = wtforms.BooleanField( "Message Without Join", [wtforms.validators.Optional()] ) ssl = wtforms.BooleanField("Use SSL", [wtforms.validators.Optional()]) class Hook(BaseHook): """ IRC hooks. """ name = "IRC" description = ( "This hook sends message to the mention channel regarding" " the changes made by the pushes to the git repository." ) form = IrcForm db_object = IrcTable backref = "irc_hook" form_fields = [ "server", "port", "room", "nick", "nick_pass", "active", "join", "ssl", ] @classmethod def install(cls, project, dbobj): """ Method called to install the hook for a project. :arg project: a ``pagure.model.Project`` object to which the hook should be installed """ repopaths = [get_repo_path(project)] repo_obj = pygit2.Repository(repopaths[0]) # noqa # Configure the hook # repo_obj.config.set_multivar() # Install the hook itself # cls.base_install(repopaths, dbobj, 'irc', 'git_irc.py') @classmethod def remove(cls, project): """ Method called to remove the hook of a project. :arg project: a ``pagure.model.Project`` object to which the hook should be installed """ repopaths = [get_repo_path(project)] # noqa # cls.base_remove(repopaths, 'irc')
gpl-2.0
archen/django
django/core/urlresolvers.py
2
22195
""" This module converts requested URLs to callback view functions. RegexURLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a tuple in this format: (view_function, function_args, function_kwargs) """ from __future__ import unicode_literals from importlib import import_module import re from threading import local from django.http import Http404 from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str, force_text, iri_to_uri from django.utils.functional import lazy from django.utils.http import urlquote from django.utils.module_loading import module_has_submodule from django.utils.regex_helper import normalize from django.utils import six, lru_cache from django.utils.translation import get_language # SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for # the current thread (which is the only one we ever access), it is assumed to # be empty. _prefixes = local() # Overridden URLconfs for each thread are stored here. _urlconfs = local() class ResolverMatch(object): def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None): self.func = func self.args = args self.kwargs = kwargs self.app_name = app_name if namespaces: self.namespaces = [x for x in namespaces if x] else: self.namespaces = [] if not url_name: if not hasattr(func, '__name__'): # An instance of a callable class url_name = '.'.join([func.__class__.__module__, func.__class__.__name__]) else: # A function url_name = '.'.join([func.__module__, func.__name__]) self.url_name = url_name @property def namespace(self): return ':'.join(self.namespaces) @property def view_name(self): return ':'.join(filter(bool, (self.namespace, self.url_name))) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name='%s', app_name='%s', namespace='%s')" % ( self.func, self.args, self.kwargs, self.url_name, self.app_name, self.namespace) class Resolver404(Http404): pass class NoReverseMatch(Exception): pass @lru_cache.lru_cache(maxsize=None) def get_callable(lookup_view, can_fail=False): """ Convert a string version of a function name to the callable object. If the lookup_view is not an import path, it is assumed to be a URL pattern label and the original string is returned. If can_fail is True, lookup_view might be a URL pattern label, so errors during the import fail and the string is returned. """ if not callable(lookup_view): mod_name, func_name = get_mod_func(lookup_view) if func_name == '': return lookup_view try: mod = import_module(mod_name) except ImportError: parentmod, submod = get_mod_func(mod_name) if (not can_fail and submod != '' and not module_has_submodule(import_module(parentmod), submod)): raise ViewDoesNotExist( "Could not import %s. Parent module %s does not exist." % (lookup_view, mod_name)) if not can_fail: raise else: try: lookup_view = getattr(mod, func_name) if not callable(lookup_view): raise ViewDoesNotExist( "Could not import %s.%s. View is not callable." % (mod_name, func_name)) except AttributeError: if not can_fail: raise ViewDoesNotExist( "Could not import %s. View does not exist in module %s." % (lookup_view, mod_name)) return lookup_view @lru_cache.lru_cache(maxsize=None) def get_resolver(urlconf): if urlconf is None: from django.conf import settings urlconf = settings.ROOT_URLCONF return RegexURLResolver(r'^/', urlconf) @lru_cache.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver): # Build a namespaced resolver for the given parent urlconf pattern. # This makes it possible to have captured parameters in the parent # urlconf pattern. ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns) return RegexURLResolver(r'^/', [ns_resolver]) def get_mod_func(callback): # Converts 'django.views.news.stories.story_detail' to # ['django.views.news.stories', 'story_detail'] try: dot = callback.rindex('.') except ValueError: return callback, '' return callback[:dot], callback[dot + 1:] class LocaleRegexProvider(object): """ A mixin to provide a default regex property which can vary by active language. """ def __init__(self, regex): # regex is either a string representing a regular expression, or a # translatable string (using ugettext_lazy) representing a regular # expression. self._regex = regex self._regex_dict = {} @property def regex(self): """ Returns a compiled regular expression, depending upon the activated language-code. """ language_code = get_language() if language_code not in self._regex_dict: if isinstance(self._regex, six.string_types): regex = self._regex else: regex = force_text(self._regex) try: compiled_regex = re.compile(regex, re.UNICODE) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, six.text_type(e))) self._regex_dict[language_code] = compiled_regex return self._regex_dict[language_code] class RegexURLPattern(LocaleRegexProvider): def __init__(self, regex, callback, default_args=None, name=None): LocaleRegexProvider.__init__(self, regex) # callback is either a string like 'foo.views.news.stories.story_detail' # which represents the path to a module and a view function name, or a # callable object (view). if callable(callback): self._callback = callback else: self._callback = None self._callback_str = callback self.default_args = default_args or {} self.name = name def __repr__(self): return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)) def add_prefix(self, prefix): """ Adds the prefix string to a string-based callback. """ if not prefix or not hasattr(self, '_callback_str'): return self._callback_str = prefix + '.' + self._callback_str def resolve(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() if kwargs: args = () else: args = match.groups() # In both cases, pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.name) @property def callback(self): if self._callback is not None: return self._callback self._callback = get_callable(self._callback_str) return self._callback class RegexURLResolver(LocaleRegexProvider): def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None): LocaleRegexProvider.__init__(self, regex) # urlconf_name is a string representing the module containing URLconfs. self.urlconf_name = urlconf_name if not isinstance(urlconf_name, six.string_types): self._urlconf_module = self.urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} def __repr__(self): if isinstance(self.urlconf_name, list) and len(self.urlconf_name): # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return str('<%s %s (%s:%s) %s>') % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.regex.pattern) def _populate(self): lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for pattern in reversed(self.url_patterns): p_pattern = pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(pattern, RegexURLResolver): if pattern.namespace: namespaces[pattern.namespace] = (p_pattern, pattern) if pattern.app_name: apps.setdefault(pattern.app_name, []).append(pattern.namespace) else: parent = normalize(pattern.regex.pattern) for name in pattern.reverse_dict: for matches, pat, defaults in pattern.reverse_dict.getlist(name): new_matches = [] for piece, p_args in parent: new_matches.extend((piece + suffix, p_args + args) for (suffix, args) in matches) lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs))) for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items(): namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) else: bits = normalize(p_pattern) lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args)) if pattern.name is not None: lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args)) self._reverse_dict[language_code] = lookups self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] def resolve(self, path): path = force_text(path) # path may be a reverse_lazy object tried = [] match = self.regex.search(path) if match: new_path = path[match.end():] for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: sub_match_dict = dict(match.groupdict(), **self.default_kwargs) sub_match_dict.update(sub_match.kwargs) return ResolverMatch(sub_match.func, sub_match.args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @property def urlconf_module(self): try: return self._urlconf_module except AttributeError: self._urlconf_module = import_module(self.urlconf_name) return self._urlconf_module @property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError: msg = ( "The included urlconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) return patterns def _resolve_special(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use default # Lazy import, since django.urls imports this file from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def resolve400(self): return self._resolve_special('400') def resolve403(self): return self._resolve_special('403') def resolve404(self): return self._resolve_special('404') def resolve500(self): return self._resolve_special('500') def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") text_args = [force_text(v) for v in args] text_kwargs = dict((k, force_text(v)) for (k, v) in kwargs.items()) try: lookup_view = get_callable(lookup_view, True) except (ImportError, AttributeError) as e: raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e)) possibilities = self.reverse_dict.getlist(lookup_view) prefix_norm, prefix_args = normalize(urlquote(_prefix))[0] for possibility, pattern, defaults in possibilities: for result, params in possibility: if args: if len(args) != len(params) + len(prefix_args): continue candidate_subs = dict(zip(prefix_args + params, text_args)) else: if set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys()) | set(prefix_args): continue matches = True for k, v in defaults.items(): if kwargs.get(k, v) != v: matches = False break if not matches: continue candidate_subs = text_kwargs # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = prefix_norm.replace('%', '%%') + result if re.search('^%s%s' % (prefix_norm, pattern), candidate_pat % candidate_subs, re.UNICODE): candidate_subs = dict((k, urlquote(v)) for (k, v) in candidate_subs.items()) return candidate_pat % candidate_subs # lookup_view can be URL label, or dotted path, or callable, Any of # these can be passed in at the top, but callables are not friendly in # error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (possibility, pattern, defaults) in possibilities] raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword " "arguments '%s' not found. %d pattern(s) tried: %s" % (lookup_view_s, args, kwargs, len(patterns), patterns)) class LocaleRegexURLResolver(RegexURLResolver): """ A URL resolver that always matches the active language code as URL prefix. Rather than taking a regex argument, we just override the ``regex`` function to always return the active language-code as regex. """ def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None): super(LocaleRegexURLResolver, self).__init__( None, urlconf_name, default_kwargs, app_name, namespace) @property def regex(self): language_code = get_language() if language_code not in self._regex_dict: regex_compiled = re.compile('^%s/' % language_code, re.UNICODE) self._regex_dict[language_code] = regex_compiled return self._regex_dict[language_code] def resolve(path, urlconf=None): if urlconf is None: urlconf = get_urlconf() return get_resolver(urlconf).resolve(path) def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} if prefix is None: prefix = get_script_prefix() if not isinstance(viewname, six.string_types): view = viewname else: parts = viewname.split(':') parts.reverse() view = parts[0] path = parts[1:] resolved_path = [] ns_pattern = '' while path: ns = path.pop() # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_app and current_app in app_list: # If we are reversing for a particular app, # use that namespace ns = current_app elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path))) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver(ns_pattern, resolver) return iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)) reverse_lazy = lazy(reverse, str) def clear_url_caches(): get_callable.cache_clear() get_resolver.cache_clear() get_ns_resolver.cache_clear() def set_script_prefix(prefix): """ Sets the script prefix for the current thread. """ if not prefix.endswith('/'): prefix += '/' _prefixes.value = prefix def get_script_prefix(): """ Returns the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner). """ return getattr(_prefixes, "value", '/') def clear_script_prefix(): """ Unsets the script prefix for the current thread. """ try: del _prefixes.value except AttributeError: pass def set_urlconf(urlconf_name): """ Sets the URLconf for the current thread (overriding the default one in settings). Set to None to revert back to the default. """ if urlconf_name: _urlconfs.value = urlconf_name else: if hasattr(_urlconfs, "value"): del _urlconfs.value def get_urlconf(default=None): """ Returns the root URLconf to use for the current thread if it has been changed from the default one. """ return getattr(_urlconfs, "value", default) def is_valid_path(path, urlconf=None): """ Returns True if the given path resolves against the default URL resolver, False otherwise. This is a convenience method to make working with "is this a match?" cases easier, avoiding unnecessarily indented try...except blocks. """ try: resolve(path, urlconf) return True except Resolver404: return False
bsd-3-clause
ruffsl/ros_buildfarm
scripts/doc/generate_doc_job.py
2
1768
#!/usr/bin/env python3 # Copyright 2015-2016 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys from ros_buildfarm.argument import add_argument_arch from ros_buildfarm.argument import add_argument_build_name from ros_buildfarm.argument import add_argument_config_url from ros_buildfarm.argument import add_argument_os_code_name from ros_buildfarm.argument import add_argument_os_name from ros_buildfarm.argument import add_argument_repository_name from ros_buildfarm.argument import add_argument_rosdistro_name from ros_buildfarm.doc_job import configure_doc_job def main(argv=sys.argv[1:]): parser = argparse.ArgumentParser( description="Generate a 'doc' job on Jenkins") add_argument_config_url(parser) add_argument_rosdistro_name(parser) add_argument_build_name(parser, 'doc') add_argument_repository_name(parser) add_argument_os_name(parser) add_argument_os_code_name(parser) add_argument_arch(parser) args = parser.parse_args(argv) return configure_doc_job( args.config_url, args.rosdistro_name, args.doc_build_name, args.repository_name, args.os_name, args.os_code_name, args.arch) if __name__ == '__main__': sys.exit(main())
apache-2.0
Sodki/ansible
test/units/errors/test_errors.py
60
4290
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject from ansible.errors import AnsibleError from ansible.compat.tests import BUILTINS from ansible.compat.tests.mock import mock_open, patch class TestErrors(unittest.TestCase): def setUp(self): self.message = 'This is the error message' self.unicode_message = 'This is an error with \xf0\x9f\x98\xa8 in it' self.obj = AnsibleBaseYAMLObject() def tearDown(self): pass def test_basic_error(self): e = AnsibleError(self.message) self.assertEqual(e.message, self.message) self.assertEqual(e.__repr__(), self.message) def test_basic_unicode_error(self): e = AnsibleError(self.unicode_message) self.assertEqual(e.message, self.unicode_message) self.assertEqual(e.__repr__(), self.unicode_message) @patch.object(AnsibleError, '_get_error_lines_from_file') def test_error_with_object(self, mock_method): self.obj.ansible_pos = ('foo.yml', 1, 1) mock_method.return_value = ('this is line 1\n', '') e = AnsibleError(self.message, self.obj) self.assertEqual( e.message, ("This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the " "exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") ) def test_get_error_lines_from_file(self): m = mock_open() m.return_value.readlines.return_value = ['this is line 1\n'] with patch('{0}.open'.format(BUILTINS), m): # this line will be found in the file self.obj.ansible_pos = ('foo.yml', 1, 1) e = AnsibleError(self.message, self.obj) self.assertEqual( e.message, ("This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on " "the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") ) # this line will not be found, as it is out of the index range self.obj.ansible_pos = ('foo.yml', 2, 1) e = AnsibleError(self.message, self.obj) self.assertEqual( e.message, ("This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on " "the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") ) m = mock_open() m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n'] with patch('{0}.open'.format(BUILTINS), m): # this line will be found in the file self.obj.ansible_pos = ('foo.yml', 1, 1) e = AnsibleError(self.unicode_message, self.obj) self.assertEqual( e.message, ("This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the " "file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ " "here\n") )
gpl-3.0
Nexenta/cinder
cinder/api/contrib/resource_common_manage.py
6
2375
# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder import exception from cinder.i18n import _ def get_manageable_resources(req, is_detail, function_get_manageable, view_builder): context = req.environ['cinder.context'] params = req.params.copy() host = params.get('host') if host is None: raise exception.InvalidHost( reason=_("Host must be specified in query parameters")) marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params, default_key='reference') # These parameters are generally validated at the DB layer, but in this # case sorting is not done by the DB valid_sort_keys = ('reference', 'size') invalid_keys = [key for key in sort_keys if key not in valid_sort_keys] if invalid_keys: msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) raise exception.InvalidParameterValue(err=msg) valid_sort_dirs = ('asc', 'desc') invalid_dirs = [d for d in sort_dirs if d not in valid_sort_dirs] if invalid_dirs: msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) raise exception.InvalidParameterValue(err=msg) resources = function_get_manageable(context, host, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) resource_count = len(resources) if is_detail: resources = view_builder.detail_list(req, resources, resource_count) else: resources = view_builder.summary_list(req, resources, resource_count) return resources
apache-2.0
alkivi-sas/projety-api
projety/__init__.py
1
3180
""" Application factory. Use to return a flask app configured. """ import os import logging logging.debug('init because of salt') # noqa from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS from flask_socketio import SocketIO from flask_principal import Principal from flasgger import Swagger from celery import Celery from config import config from wsproxy import FlaskWsProxy # Flask extensions db = SQLAlchemy() cors = CORS() socketio = SocketIO() remote_proxy = FlaskWsProxy() celery = Celery(__name__, broker=os.environ.get('CELERY_BROKER_URL', 'redis://'), backend=os.environ.get('CELERY_BROKER_URL', 'redis://')) swagger = Swagger() principal = Principal(use_sessions=False) # Import models so that they are registered with SQLAlchemy from . import models # noqa # Import celery task so that it is registered with the Celery workers from .api.tasks import run_flask_request # noqa # Import Socket.IO events so that they are registered with Flask-SocketIO from . import events # noqa # Import salt so they are ok+ from . import salt # noqa def fix_logger(app): """Reset logging due to salt mess.""" w_logger = logging.getLogger('werkzeug') logger = app.logger if app.debug or app.testing: w_logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) del w_logger.handlers[:] del logger.handlers[:] def create_app(config_name=None, main=True): """Return a flask app.""" if config_name is None: config_name = os.environ.get('PROJETY_CONFIG', 'development') app = Flask(__name__) app.config.from_object(config[config_name]) # Initialize flask extensions db.init_app(app) cors.init_app(app) swagger.init_app(app) principal.init_app(app) if main: # Initialize socketio server and attach it to the message queue, so # that everything works even when there are multiple servers or # additional processes such as Celery workers wanting to access # Socket.IO socketio.init_app(app, message_queue=app.config['SOCKETIO_MESSAGE_QUEUE']) # Our wsproxy is only needed for the main app remote_proxy.init_app(app) else: # Initialize socketio to emit events through through the message queue # Note that since Celery does not use eventlet, we have to be explicit # in setting the async mode to not use it. socketio.init_app(None, message_queue=app.config['SOCKETIO_MESSAGE_QUEUE'], async_mode='threading') celery.conf.update(config[config_name].CELERY_CONFIG) # Reset logging due to salt mess fix_logger(app) # Register API routes from .api import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api') # Register handlers from .errors import not_found, method_not_supported, internal_server_error app.register_error_handler(404, not_found) app.register_error_handler(405, method_not_supported) app.register_error_handler(500, internal_server_error) return app
lgpl-3.0
Tehsmash/nova
nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py
21
8799
# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg import webob from nova.api.openstack.compute.contrib import floating_ips_bulk as fipbulk_v2 from nova.api.openstack.compute.plugins.v3 import floating_ips_bulk as\ fipbulk_v21 from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes CONF = cfg.CONF class FloatingIPBulkV21(test.TestCase): floating_ips_bulk = fipbulk_v21 bad_request = exception.ValidationError def setUp(self): super(FloatingIPBulkV21, self).setUp() self.context = context.get_admin_context() self.controller = self.floating_ips_bulk.FloatingIPBulkController() self.req = fakes.HTTPRequest.blank('') def _setup_floating_ips(self, ip_range): body = {'floating_ips_bulk_create': {'ip_range': ip_range}} res_dict = self.controller.create(self.req, body=body) response = {"floating_ips_bulk_create": { 'ip_range': ip_range, 'pool': CONF.default_floating_pool, 'interface': CONF.public_interface}} self.assertEqual(res_dict, response) def test_create_ips(self): ip_range = '192.168.1.0/28' self._setup_floating_ips(ip_range) def test_create_ips_pool(self): ip_range = '10.0.1.0/29' pool = 'a new pool' body = {'floating_ips_bulk_create': {'ip_range': ip_range, 'pool': pool}} res_dict = self.controller.create(self.req, body=body) response = {"floating_ips_bulk_create": { 'ip_range': ip_range, 'pool': pool, 'interface': CONF.public_interface}} self.assertEqual(res_dict, response) def test_list_ips(self): self._test_list_ips(self.req) def _test_list_ips(self, req): ip_range = '192.168.1.1/28' self._setup_floating_ips(ip_range) res_dict = self.controller.index(req) ip_info = [{'address': str(ip_addr), 'pool': CONF.default_floating_pool, 'interface': CONF.public_interface, 'project_id': None, 'instance_uuid': None, 'fixed_ip': None} for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()] response = {'floating_ip_info': ip_info} self.assertEqual(res_dict, response) def test_list_ips_associated(self): self._test_list_ips_associated(self.req) @mock.patch('nova.objects.FloatingIPList.get_all') def _test_list_ips_associated(self, req, mock_get): instance_uuid = "fake-uuid" fixed_address = "10.0.0.1" floating_address = "192.168.0.1" fixed_ip = objects.FixedIP(instance_uuid=instance_uuid, address=fixed_address) floating_ip = objects.FloatingIP(address=floating_address, fixed_ip=fixed_ip, pool=CONF.default_floating_pool, interface=CONF.public_interface, project_id=None) floating_list = objects.FloatingIPList(objects=[floating_ip]) mock_get.return_value = floating_list res_dict = self.controller.index(req) ip_info = [{'address': floating_address, 'pool': CONF.default_floating_pool, 'interface': CONF.public_interface, 'project_id': None, 'instance_uuid': instance_uuid, 'fixed_ip': fixed_address}] response = {'floating_ip_info': ip_info} self.assertEqual(res_dict, response) def test_list_ip_by_host(self): self._test_list_ip_by_host(self.req) def _test_list_ip_by_host(self, req): ip_range = '192.168.1.1/28' self._setup_floating_ips(ip_range) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 'host') def test_delete_ips(self): self._test_delete_ips(self.req) def _test_delete_ips(self, req): ip_range = '192.168.1.0/29' self._setup_floating_ips(ip_range) body = {'ip_range': ip_range} res_dict = self.controller.update(req, "delete", body=body) response = {"floating_ips_bulk_delete": ip_range} self.assertEqual(res_dict, response) # Check that the IPs are actually deleted res_dict = self.controller.index(req) response = {'floating_ip_info': []} self.assertEqual(res_dict, response) def test_create_duplicate_fail(self): ip_range = '192.168.1.0/30' self._setup_floating_ips(ip_range) ip_range = '192.168.1.0/29' body = {'floating_ips_bulk_create': {'ip_range': ip_range}} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=body) def test_create_bad_cidr_fail(self): # netaddr can't handle /32 or 31 cidrs ip_range = '192.168.1.1/32' body = {'floating_ips_bulk_create': {'ip_range': ip_range}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=body) def test_create_invalid_cidr_fail(self): ip_range = 'not a cidr' body = {'floating_ips_bulk_create': {'ip_range': ip_range}} self.assertRaises(self.bad_request, self.controller.create, self.req, body=body) class FloatingIPBulkV2(FloatingIPBulkV21): floating_ips_bulk = fipbulk_v2 bad_request = webob.exc.HTTPBadRequest def setUp(self): super(FloatingIPBulkV2, self).setUp() self.non_admin_req = fakes.HTTPRequest.blank('') self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) def test_list_ips_with_non_admin(self): ip_range = '192.168.1.1/28' self._setup_floating_ips(ip_range) self.assertRaises(exception.AdminRequired, self.controller.index, self.non_admin_req) def test_list_ip_with_non_admin(self): ip_range = '192.168.1.1/28' self._setup_floating_ips(ip_range) self.assertRaises(exception.AdminRequired, self.controller.show, self.non_admin_req, "host") def test_delete_ips(self): self._test_delete_ips(self.admin_req) def test_list_ip_by_host(self): self._test_list_ip_by_host(self.admin_req) def test_list_ips_associated(self): self._test_list_ips_associated(self.admin_req) def test_list_ips(self): self._test_list_ips(self.admin_req) class FloatingIPBulkPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): super(FloatingIPBulkPolicyEnforcementV21, self).setUp() self.controller = fipbulk_v21.FloatingIPBulkController() self.req = fakes.HTTPRequest.blank('') def _common_policy_check(self, func, *arg, **kwarg): rule_name = "os_compute_api:os-floating-ips-bulk" rule = {rule_name: "project:non_fake"} self.policy.set_rules(rule) exc = self.assertRaises( exception.PolicyNotAuthorized, func, *arg, **kwarg) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_index_policy_failed(self): self._common_policy_check(self.controller.index, self.req) def test_show_ip_policy_failed(self): self._common_policy_check(self.controller.show, self.req, "host") def test_create_policy_failed(self): ip_range = '192.168.1.0/28' body = {'floating_ips_bulk_create': {'ip_range': ip_range}} self._common_policy_check(self.controller.create, self.req, body=body) def test_update_policy_failed(self): ip_range = '192.168.1.0/29' body = {'ip_range': ip_range} self._common_policy_check(self.controller.update, self.req, "delete", body=body)
apache-2.0
ProfessionalIT/maxigenios-website
sdk/google_appengine/lib/django-1.3/django/utils/unittest/util.py
751
2821
"""Various utility functions.""" __unittest = True _MAX_LENGTH = 80 def safe_repr(obj, short=False): try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' def safe_str(obj): try: return str(obj) except Exception: return object.__str__(obj) def strclass(cls): return "%s.%s" % (cls.__module__, cls.__name__) def sorted_list_difference(expected, actual): """Finds elements in only one or the other of two, sorted input lists. Returns a two-element tuple of lists. The first list contains those elements in the "expected" list but not in the "actual" list, and the second contains those elements in the "actual" list but not in the "expected" list. Duplicate elements in either input list are ignored. """ i = j = 0 missing = [] unexpected = [] while True: try: e = expected[i] a = actual[j] if e < a: missing.append(e) i += 1 while expected[i] == e: i += 1 elif e > a: unexpected.append(a) j += 1 while actual[j] == a: j += 1 else: i += 1 try: while expected[i] == e: i += 1 finally: j += 1 while actual[j] == a: j += 1 except IndexError: missing.extend(expected[i:]) unexpected.extend(actual[j:]) break return missing, unexpected def unorderable_list_difference(expected, actual, ignore_duplicate=False): """Same behavior as sorted_list_difference but for lists of unorderable items (like dicts). As it does a linear search per item (remove) it has O(n*n) performance. """ missing = [] unexpected = [] while expected: item = expected.pop() try: actual.remove(item) except ValueError: missing.append(item) if ignore_duplicate: for lst in expected, actual: try: while True: lst.remove(item) except ValueError: pass if ignore_duplicate: while actual: item = actual.pop() unexpected.append(item) try: while True: actual.remove(item) except ValueError: pass return missing, unexpected # anything left in actual is unexpected return missing, actual
mit
hakril/PythonForWindows
windows/rpc/client.py
1
7757
import ctypes import struct import windows.alpc as alpc import windows.com import windows.generated_def as gdef if windows.pycompat.is_py3: buffer = bytes KNOW_REQUEST_TYPE = gdef.FlagMapper(gdef.RPC_REQUEST_TYPE_CALL, gdef.RPC_REQUEST_TYPE_BIND) KNOW_RESPONSE_TYPE = gdef.FlagMapper(gdef.RPC_RESPONSE_TYPE_FAIL, gdef.RPC_RESPONSE_TYPE_SUCCESS, gdef.RPC_RESPONSE_TYPE_BIND_OK) KNOWN_RPC_ERROR_CODE = gdef.FlagMapper( gdef.ERROR_INVALID_HANDLE, gdef.RPC_X_BAD_STUB_DATA, gdef.RPC_S_UNKNOWN_IF, gdef.RPC_S_PROTOCOL_ERROR, gdef.RPC_S_UNSUPPORTED_TRANS_SYN, gdef.RPC_S_PROCNUM_OUT_OF_RANGE) NOT_USED = 0xBAADF00D class ALPC_RPC_BIND(ctypes.Structure): _pack_ = 1 _fields_ = [ ("request_type", gdef.DWORD), ("UNK1", gdef.DWORD), ("UNK2", gdef.DWORD), ("target", gdef.RPC_IF_ID), ("flags", gdef.DWORD), ("if_nb_ndr32", gdef.USHORT), ("if_nb_ndr64", gdef.USHORT), ("if_nb_unkn", gdef.USHORT), ("PAD", gdef.USHORT), ("register_multiple_syntax", gdef.DWORD), ("use_flow", gdef.DWORD), ("UNK5", gdef.DWORD), ("maybe_flow_id", gdef.DWORD), ("UNK7", gdef.DWORD), ("some_context_id", gdef.DWORD), ("UNK9", gdef.DWORD), ] class ALPC_RPC_CALL(ctypes.Structure): _pack_ = 1 _fields_ = [ ("request_type", gdef.DWORD), ("UNK1", gdef.DWORD), ("flags",gdef.DWORD), ("request_id", gdef.DWORD), ("if_nb", gdef.DWORD), ("method_offset", gdef.DWORD), ("UNK2", gdef.DWORD), ("UNK3", gdef.DWORD), ("UNK4", gdef.DWORD), ("UNK5", gdef.DWORD), ("UNK6", gdef.DWORD), ("UNK7", gdef.DWORD), ("ORPC_IPID", gdef.GUID) ] class RPCClient(object): """A client for RPC-over-ALPC able to bind to interface and perform calls using NDR32 marshalling""" REQUEST_IDENTIFIER = 0x11223344 def __init__(self, port): self.alpc_client = alpc.AlpcClient(port) #: The :class:`windows.alpc.AlpcClient` used to communicate with the server self.number_of_bind_if = 0 # if -> interface self.if_bind_number = {} def bind(self, IID_str, version=(1,0)): """Bind to the ``IID_str`` with the given ``version`` :returns: :class:`windows.generated_def.IID` """ IID = windows.com.IID.from_string(IID_str) request = self._forge_bind_request(IID, version, self.number_of_bind_if) response = self._send_request(request) # Parse reponse request_type = self._get_request_type(response) if request_type != gdef.RPC_RESPONSE_TYPE_BIND_OK: raise ValueError("Unexpected reponse type. Expected RESPONSE_TYPE_BIND_OK got {0}".format(KNOW_RESPONSE_TYPE[request_type])) iid_hash = hash(buffer(IID)[:]) # TODO: add __hash__ to IID self.if_bind_number[iid_hash] = self.number_of_bind_if self.number_of_bind_if += 1 #TODO: attach version information to IID return IID def forge_alpc_request(self, IID, method_offset, params, ipid=None): """Craft an ALPC message containing an RPC request to call ``method_offset`` of interface ``IID` with ``params``. Can be used to craft request without directly sending it """ iid_hash = hash(buffer(IID)[:]) interface_nb = self.if_bind_number[iid_hash] # TODO: add __hash__ to IID if len(params) > 0x900: # 0x1000 - size of meta-data request = self._forge_call_request_in_view(interface_nb, method_offset, params, ipid=ipid) else: request = self._forge_call_request(interface_nb, method_offset, params, ipid=ipid) return request def call(self, IID, method_offset, params, ipid=None): """Call method number ``method_offset`` of interface ``IID`` with mashalled ``params`` :param IID IID: An IID previously returned by :func:`bind` :param int method_offset: :param str params: The mashalled parameters (NDR32) :returns: :class:`str` """ request = self.forge_alpc_request(IID, method_offset, params, ipid=ipid) response = self._send_request(request) # Parse reponse request_type = self._get_request_type(response) if request_type != gdef.RPC_RESPONSE_TYPE_SUCCESS: raise ValueError("Unexpected reponse type. Expected RESPONSE_SUCCESS got {0}".format(KNOW_RESPONSE_TYPE[request_type])) # windows.utils.sprint(ALPC_RPC_CALL.from_buffer_copy(response + "\x00" * 12)) data = struct.unpack("<6I", response[:6 * 4]) assert data[3] == self.REQUEST_IDENTIFIER return response[4 * 6:] # Should be the return value (not completly verified) def _send_request(self, request): response = self.alpc_client.send_receive(request) return response.data def _forge_call_request(self, interface_nb, method_offset, params, ipid=None): # TODO: differents REQUEST_IDENTIFIER for each req ? # TODO: what is this '0' ? (1 is also accepted) (flags ?) # request = struct.pack("<16I", gdef.RPC_REQUEST_TYPE_CALL, NOT_USED, 1, self.REQUEST_IDENTIFIER, interface_nb, method_offset, *[NOT_USED] * 10) req = ALPC_RPC_CALL() req.request_type = gdef.RPC_REQUEST_TYPE_CALL req.flags = 0 req.request_id = self.REQUEST_IDENTIFIER req.if_nb = interface_nb req.method_offset = method_offset if ipid: req.ORPC_IPID = ipid this = gdef.ORPCTHIS() this.version = (5,7) this.flags = 1 lthis = gdef.LOCALTHIS() return buffer(req)[:] + buffer(this)[:] + buffer(lthis)[:] + params return buffer(req)[:] + params def _forge_call_request_in_view(self, interface_nb, method_offset, params, ipid=None): # import pdb;pdb.set_trace() # Version crade qui clean rien pour POC. GROS DOUTES :D raw_request = self._forge_call_request(interface_nb, method_offset, "") p = windows.alpc.AlpcMessage(0x2000) section = self.alpc_client.create_port_section(0x40000, 0, len(params)) view = self.alpc_client.map_section(section[0], len(params)) p.port_message.data = raw_request + windows.rpc.ndr.NdrLong.pack(len(params) + 0x200) + "\x00" * 40 p.attributes.ValidAttributes |= gdef.ALPC_MESSAGE_VIEW_ATTRIBUTE p.view_attribute.Flags = 0x40000 p.view_attribute.ViewBase = view.ViewBase p.view_attribute.SectionHandle = view.SectionHandle p.view_attribute.ViewSize = len(params) windows.current_process.write_memory(view.ViewBase, params) # Write NDR to view return p def _forge_bind_request(self, uuid, syntaxversion, requested_if_nb): version_major, version_minor = syntaxversion req = ALPC_RPC_BIND() req.request_type = gdef.RPC_REQUEST_TYPE_BIND req.target = gdef.RPC_IF_ID(uuid, *syntaxversion) req.flags = gdef.BIND_IF_SYNTAX_NDR32 req.if_nb_ndr32 = requested_if_nb req.if_nb_ndr64 = 0 req.if_nb_unkn = 0 req.register_multiple_syntax = False req.some_context_id = 0xB00B00B return buffer(req)[:] def _get_request_type(self, response): "raise if request_type == RESPONSE_TYPE_FAIL" request_type = struct.unpack("<I", response[:4])[0] if request_type == gdef.RPC_RESPONSE_TYPE_FAIL: error_code = struct.unpack("<5I", response)[2] raise ValueError("RPC Response error {0} ({1})".format(error_code, KNOWN_RPC_ERROR_CODE.get(error_code, error_code))) return request_type
bsd-3-clause
Chukwunonso/ndi_anambra
ndi_anambra/settings/production.py
1
3499
from .base import * # flake8: noqa DEBUG = env.bool('DJANGO_DEBUG_NEW', default=False) TEMPLATES[0]['OPTIONS']['debug'] = DEBUG SECRET_KEY = env('DJANGO_SECRET_KEY') # Compress static files offline # http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE COMPRESS_OFFLINE = True COMPRESS_CSS_FILTERS = [ 'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter', ] ALLOWED_HOSTS = env("DJANGO_ALLOWED_HOST_NAME").split(',') DATABASES['default'] = env.db('PROD_DATABASE_URL') INSTALLED_APPS += ( "wagtail.contrib.frontend_cache", 'gunicorn', ) #support opbeat MIDDLEWARE = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'wagtail.core.middleware.SiteMiddleware', 'wagtail.contrib.redirects.middleware.RedirectMiddleware', ) WAGTAIL_SITE_NAME = 'ndi_anambra' # Send notification emails as a background task using Celery, # to prevent this from blocking web server threads # (requires the django-celery package): # http://celery.readthedocs.org/en/latest/configuration.html # import djcelery # # djcelery.setup_loader() # # CELERY_SEND_TASK_ERROR_EMAILS = True # BROKER_URL = 'redis://' # Use Redis as the cache backend for extra performance # (requires the django-redis-cache package): # http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0), 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'IGNORE_EXCEPTIONS': True, } } } DEFAULT_FROM_EMAIL = env('EMAIL_FROM') EMAIL_USE_TLS = True EMAIL_HOST = env("EMAIL_HOST") EMAIL_HOST_USER = env('EMAIL_USER') EMAIL_HOST_PASSWORD = env('EMAIL_PASSWD') EMAIL_PORT = 587 # LOGGING CONFIGURATION # Sends an email to site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True }, 'django.security.DisallowedHost': { 'level': 'ERROR', 'handlers': ['console', 'mail_admins'], 'propagate': True } } }
mit
WarrenWeckesser/scipy
scipy/io/tests/test_mmio.py
12
26468
from tempfile import mkdtemp, mktemp import os import io import shutil import textwrap import numpy as np from numpy import array, transpose, pi from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, assert_array_almost_equal) import pytest from pytest import raises as assert_raises import scipy.sparse from scipy.io.mmio import mminfo, mmread, mmwrite parametrize_args = [('integer', 'int'), ('unsigned-integer', 'uint')] class TestMMIOArray: def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(a, b) def check_exact(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_equal(a, b) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_integer(self, typeval, dtype): self.check_exact(array([[1, 2], [3, 4]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_32bit_integer(self, typeval, dtype): a = array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=dtype) self.check_exact(a, (2, 2, 4, 'array', typeval, 'general')) def test_64bit_integer(self): a = array([[2**31, 2**32], [2**63-2, 2**63-1]], dtype=np.int64) if (np.intp(0).itemsize < 8): assert_raises(OverflowError, mmwrite, self.fn, a) else: self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general')) def test_64bit_unsigned_integer(self): a = array([[2**31, 2**32], [2**64-2, 2**64-1]], dtype=np.uint64) self.check_exact(a, (2, 2, 4, 'array', 'unsigned-integer', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_upper_triangle_integer(self, typeval, dtype): self.check_exact(array([[0, 1], [0, 0]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_lower_triangle_integer(self, typeval, dtype): self.check_exact(array([[0, 0], [1, 0]], dtype=dtype), (2, 2, 4, 'array', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_rectangular_integer(self, typeval, dtype): self.check_exact(array([[1, 2, 3], [4, 5, 6]], dtype=dtype), (2, 3, 6, 'array', typeval, 'general')) def test_simple_rectangular_float(self): self.check([[1, 2], [3.5, 4], [5, 6]], (3, 2, 6, 'array', 'real', 'general')) def test_simple_float(self): self.check([[1, 2], [3, 4.0]], (2, 2, 4, 'array', 'real', 'general')) def test_simple_complex(self): self.check([[1, 2], [3, 4j]], (2, 2, 4, 'array', 'complex', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_symmetric_integer(self, typeval, dtype): self.check_exact(array([[1, 2], [2, 4]], dtype=dtype), (2, 2, 4, 'array', typeval, 'symmetric')) def test_simple_skew_symmetric_integer(self): self.check_exact([[0, 2], [-2, 0]], (2, 2, 4, 'array', 'integer', 'skew-symmetric')) def test_simple_skew_symmetric_float(self): self.check(array([[0, 2], [-2.0, 0.0]], 'f'), (2, 2, 4, 'array', 'real', 'skew-symmetric')) def test_simple_hermitian_complex(self): self.check([[1, 2+3j], [2-3j, 4]], (2, 2, 4, 'array', 'complex', 'hermitian')) def test_random_symmetric_float(self): sz = (20, 20) a = np.random.random(sz) a = a + transpose(a) self.check(a, (20, 20, 400, 'array', 'real', 'symmetric')) def test_random_rectangular_float(self): sz = (20, 15) a = np.random.random(sz) self.check(a, (20, 15, 300, 'array', 'real', 'general')) def test_bad_number_of_array_header_fields(self): s = """\ %%MatrixMarket matrix array real general 3 3 999 1.0 2.0 3.0 4.0 5.0 6.0 7.0 8.0 9.0 """ text = textwrap.dedent(s).encode('ascii') with pytest.raises(ValueError, match='not of length 2'): scipy.io.mmread(io.BytesIO(text)) def test_gh13634_non_skew_symmetric_int(self): self.check_exact(array([[1, 2], [-2, 99]], dtype=np.int32), (2, 2, 4, 'array', 'integer', 'general')) def test_gh13634_non_skew_symmetric_float(self): self.check(array([[1, 2], [-2, 99.]], dtype=np.float32), (2, 2, 4, 'array', 'real', 'general')) class TestMMIOSparseCSR(TestMMIOArray): def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(a.todense(), b.todense()) def check_exact(self, a, info): mmwrite(self.fn, a) assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_equal(a.todense(), b.todense()) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]], dtype=dtype), (2, 2, 4, 'coordinate', typeval, 'general')) def test_32bit_integer(self): a = scipy.sparse.csr_matrix(array([[2**31-1, -2**31+2], [2**31-3, 2**31-4]], dtype=np.int32)) self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) def test_64bit_integer(self): a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], [-2**63+2, 2**63-2]], dtype=np.int64)) if (np.intp(0).itemsize < 8): assert_raises(OverflowError, mmwrite, self.fn, a) else: self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) def test_32bit_unsigned_integer(self): a = scipy.sparse.csr_matrix(array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=np.uint32)) self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) def test_64bit_unsigned_integer(self): a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], [2**64-2, 2**64-1]], dtype=np.uint64)) self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_upper_triangle_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_lower_triangle_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]], dtype=dtype), (2, 2, 1, 'coordinate', typeval, 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_rectangular_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]], dtype=dtype), (2, 3, 6, 'coordinate', typeval, 'general')) def test_simple_rectangular_float(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]), (3, 2, 6, 'coordinate', 'real', 'general')) def test_simple_float(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]), (2, 2, 4, 'coordinate', 'real', 'general')) def test_simple_complex(self): self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]), (2, 2, 4, 'coordinate', 'complex', 'general')) @pytest.mark.parametrize('typeval, dtype', parametrize_args) def test_simple_symmetric_integer(self, typeval, dtype): self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]], dtype=dtype), (2, 2, 3, 'coordinate', typeval, 'symmetric')) def test_simple_skew_symmetric_integer(self): self.check_exact(scipy.sparse.csr_matrix([[0, 2], [-2, 0]]), (2, 2, 1, 'coordinate', 'integer', 'skew-symmetric')) def test_simple_skew_symmetric_float(self): self.check(scipy.sparse.csr_matrix(array([[0, 2], [-2.0, 0]], 'f')), (2, 2, 1, 'coordinate', 'real', 'skew-symmetric')) def test_simple_hermitian_complex(self): self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]), (2, 2, 3, 'coordinate', 'complex', 'hermitian')) def test_random_symmetric_float(self): sz = (20, 20) a = np.random.random(sz) a = a + transpose(a) a = scipy.sparse.csr_matrix(a) self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric')) def test_random_rectangular_float(self): sz = (20, 15) a = np.random.random(sz) a = scipy.sparse.csr_matrix(a) self.check(a, (20, 15, 300, 'coordinate', 'real', 'general')) def test_simple_pattern(self): a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]]) p = np.zeros_like(a.todense()) p[a.todense() > 0] = 1 info = (2, 2, 3, 'coordinate', 'pattern', 'general') mmwrite(self.fn, a, field='pattern') assert_equal(mminfo(self.fn), info) b = mmread(self.fn) assert_array_almost_equal(p, b.todense()) def test_gh13634_non_skew_symmetric_int(self): a = scipy.sparse.csr_matrix([[1, 2], [-2, 99]], dtype=np.int32) self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) def test_gh13634_non_skew_symmetric_float(self): a = scipy.sparse.csr_matrix([[1, 2], [-2, 99.]], dtype=np.float32) self.check(a, (2, 2, 4, 'coordinate', 'real', 'general')) _32bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483647 2147483646 2147483647 2147483646 ''' _32bit_integer_sparse_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 2 1 1 2147483647 2 2 2147483646 ''' _64bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483648 -9223372036854775806 -2147483648 9223372036854775807 ''' _64bit_integer_sparse_general_example = '''\ %%MatrixMarket matrix coordinate integer general 2 2 3 1 1 2147483648 1 2 9223372036854775807 2 2 9223372036854775807 ''' _64bit_integer_sparse_symmetric_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 3 1 1 2147483648 1 2 -9223372036854775807 2 2 9223372036854775807 ''' _64bit_integer_sparse_skew_example = '''\ %%MatrixMarket matrix coordinate integer skew-symmetric 2 2 3 1 1 2147483648 1 2 -9223372036854775807 2 2 9223372036854775807 ''' _over64bit_integer_dense_example = '''\ %%MatrixMarket matrix array integer general 2 2 2147483648 9223372036854775807 2147483648 9223372036854775808 ''' _over64bit_integer_sparse_example = '''\ %%MatrixMarket matrix coordinate integer symmetric 2 2 2 1 1 2147483648 2 2 19223372036854775808 ''' class TestMMIOReadLargeIntegers: def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check_read(self, example, a, info, dense, over32, over64): with open(self.fn, 'w') as f: f.write(example) assert_equal(mminfo(self.fn), info) if (over32 and (np.intp(0).itemsize < 8)) or over64: assert_raises(OverflowError, mmread, self.fn) else: b = mmread(self.fn) if not dense: b = b.todense() assert_equal(a, b) def test_read_32bit_integer_dense(self): a = array([[2**31-1, 2**31-1], [2**31-2, 2**31-2]], dtype=np.int64) self.check_read(_32bit_integer_dense_example, a, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=False, over64=False) def test_read_32bit_integer_sparse(self): a = array([[2**31-1, 0], [0, 2**31-2]], dtype=np.int64) self.check_read(_32bit_integer_sparse_example, a, (2, 2, 2, 'coordinate', 'integer', 'symmetric'), dense=False, over32=False, over64=False) def test_read_64bit_integer_dense(self): a = array([[2**31, -2**31], [-2**63+2, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_dense_example, a, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=True, over64=False) def test_read_64bit_integer_sparse_general(self): a = array([[2**31, 2**63-1], [0, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_general_example, a, (2, 2, 3, 'coordinate', 'integer', 'general'), dense=False, over32=True, over64=False) def test_read_64bit_integer_sparse_symmetric(self): a = array([[2**31, -2**63+1], [-2**63+1, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_symmetric_example, a, (2, 2, 3, 'coordinate', 'integer', 'symmetric'), dense=False, over32=True, over64=False) def test_read_64bit_integer_sparse_skew(self): a = array([[2**31, -2**63+1], [2**63-1, 2**63-1]], dtype=np.int64) self.check_read(_64bit_integer_sparse_skew_example, a, (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'), dense=False, over32=True, over64=False) def test_read_over64bit_integer_dense(self): self.check_read(_over64bit_integer_dense_example, None, (2, 2, 4, 'array', 'integer', 'general'), dense=True, over32=True, over64=True) def test_read_over64bit_integer_sparse(self): self.check_read(_over64bit_integer_sparse_example, None, (2, 2, 2, 'coordinate', 'integer', 'symmetric'), dense=False, over32=True, over64=True) _general_example = '''\ %%MatrixMarket matrix coordinate real general %================================================================================= % % This ASCII file represents a sparse MxN matrix with L % nonzeros in the following Matrix Market format: % % +----------------------------------------------+ % |%%MatrixMarket matrix coordinate real general | <--- header line % |% | <--+ % |% comments | |-- 0 or more comment lines % |% | <--+ % | M N L | <--- rows, columns, entries % | I1 J1 A(I1, J1) | <--+ % | I2 J2 A(I2, J2) | | % | I3 J3 A(I3, J3) | |-- L lines % | . . . | | % | IL JL A(IL, JL) | <--+ % +----------------------------------------------+ % % Indices are 1-based, i.e. A(1,1) is the first element. % %================================================================================= 5 5 8 1 1 1.000e+00 2 2 1.050e+01 3 3 1.500e-02 1 4 6.000e+00 4 2 2.505e+02 4 4 -2.800e+02 4 5 3.332e+01 5 5 1.200e+01 ''' _hermitian_example = '''\ %%MatrixMarket matrix coordinate complex hermitian 5 5 7 1 1 1.0 0 2 2 10.5 0 4 2 250.5 22.22 3 3 1.5e-2 0 4 4 -2.8e2 0 5 5 12. 0 5 4 0 33.32 ''' _skew_example = '''\ %%MatrixMarket matrix coordinate real skew-symmetric 5 5 7 1 1 1.0 2 2 10.5 4 2 250.5 3 3 1.5e-2 4 4 -2.8e2 5 5 12. 5 4 0 ''' _symmetric_example = '''\ %%MatrixMarket matrix coordinate real symmetric 5 5 7 1 1 1.0 2 2 10.5 4 2 250.5 3 3 1.5e-2 4 4 -2.8e2 5 5 12. 5 4 8 ''' _symmetric_pattern_example = '''\ %%MatrixMarket matrix coordinate pattern symmetric 5 5 7 1 1 2 2 4 2 3 3 4 4 5 5 5 4 ''' # example (without comment lines) from Figure 1 in # https://math.nist.gov/MatrixMarket/reports/MMformat.ps _empty_lines_example = '''\ %%MatrixMarket MATRIX Coordinate Real General 5 5 8 1 1 1.0 2 2 10.5 3 3 1.5e-2 4 4 -2.8E2 5 5 12. 1 4 6 4 2 250.5 4 5 33.32 ''' class TestMMIOCoordinate: def setup_method(self): self.tmpdir = mkdtemp() self.fn = os.path.join(self.tmpdir, 'testfile.mtx') def teardown_method(self): shutil.rmtree(self.tmpdir) def check_read(self, example, a, info): f = open(self.fn, 'w') f.write(example) f.close() assert_equal(mminfo(self.fn), info) b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_read_general(self): a = [[1, 0, 0, 6, 0], [0, 10.5, 0, 0, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 33.32], [0, 0, 0, 0, 12]] self.check_read(_general_example, a, (5, 5, 8, 'coordinate', 'real', 'general')) def test_read_hermitian(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, 250.5 - 22.22j, 0], [0, 0, .015, 0, 0], [0, 250.5 + 22.22j, 0, -280, -33.32j], [0, 0, 0, 33.32j, 12]] self.check_read(_hermitian_example, a, (5, 5, 7, 'coordinate', 'complex', 'hermitian')) def test_read_skew(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, -250.5, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 0], [0, 0, 0, 0, 12]] self.check_read(_skew_example, a, (5, 5, 7, 'coordinate', 'real', 'skew-symmetric')) def test_read_symmetric(self): a = [[1, 0, 0, 0, 0], [0, 10.5, 0, 250.5, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 8], [0, 0, 0, 8, 12]] self.check_read(_symmetric_example, a, (5, 5, 7, 'coordinate', 'real', 'symmetric')) def test_read_symmetric_pattern(self): a = [[1, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 1], [0, 0, 0, 1, 1]] self.check_read(_symmetric_pattern_example, a, (5, 5, 7, 'coordinate', 'pattern', 'symmetric')) def test_read_empty_lines(self): a = [[1, 0, 0, 6, 0], [0, 10.5, 0, 0, 0], [0, 0, .015, 0, 0], [0, 250.5, 0, -280, 33.32], [0, 0, 0, 0, 12]] self.check_read(_empty_lines_example, a, (5, 5, 8, 'coordinate', 'real', 'general')) def test_empty_write_read(self): # https://github.com/scipy/scipy/issues/1410 (Trac #883) b = scipy.sparse.coo_matrix((10, 10)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (10, 10, 0, 'coordinate', 'real', 'symmetric')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_bzip2_py3(self): # test if fix for #2152 works try: # bz2 module isn't always built when building Python. import bz2 except ImportError: return I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) fn_bzip2 = "%s.bz2" % self.fn with open(self.fn, 'rb') as f_in: f_out = bz2.BZ2File(fn_bzip2, 'wb') f_out.write(f_in.read()) f_out.close() a = mmread(fn_bzip2).todense() assert_array_almost_equal(a, b.todense()) def test_gzip_py3(self): # test if fix for #2152 works try: # gzip module can be missing from Python installation import gzip except ImportError: return I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) fn_gzip = "%s.gz" % self.fn with open(self.fn, 'rb') as f_in: f_out = gzip.open(fn_gzip, 'wb') f_out.write(f_in.read()) f_out.close() a = mmread(fn_gzip).todense() assert_array_almost_equal(a, b.todense()) def test_real_write_read(self): I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (5, 5, 8, 'coordinate', 'real', 'general')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_complex_write_read(self): I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) mmwrite(self.fn, b) assert_equal(mminfo(self.fn), (5, 5, 8, 'coordinate', 'complex', 'general')) a = b.todense() b = mmread(self.fn).todense() assert_array_almost_equal(a, b) def test_sparse_formats(self): mats = [] I = array([0, 0, 1, 2, 3, 3, 3, 4]) J = array([0, 3, 1, 2, 1, 3, 4, 4]) V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) for mat in mats: expected = mat.todense() for fmt in ['csr', 'csc', 'coo']: fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir mmwrite(fn, mat.asformat(fmt)) result = mmread(fn).todense() assert_array_almost_equal(result, expected) def test_precision(self): test_values = [pi] + [10**(i) for i in range(0, -10, -1)] test_precisions = range(1, 10) for value in test_values: for precision in test_precisions: # construct sparse matrix with test value at last main diagonal n = 10**precision + 1 A = scipy.sparse.dok_matrix((n, n)) A[n-1, n-1] = value # write matrix with test precision and read again mmwrite(self.fn, A, precision=precision) A = scipy.io.mmread(self.fn) # check for right entries in matrix assert_array_equal(A.row, [n-1]) assert_array_equal(A.col, [n-1]) assert_allclose(A.data, [float('%%.%dg' % precision % value)]) def test_bad_number_of_coordinate_header_fields(self): s = """\ %%MatrixMarket matrix coordinate real general 5 5 8 999 1 1 1.000e+00 2 2 1.050e+01 3 3 1.500e-02 1 4 6.000e+00 4 2 2.505e+02 4 4 -2.800e+02 4 5 3.332e+01 5 5 1.200e+01 """ text = textwrap.dedent(s).encode('ascii') with pytest.raises(ValueError, match='not of length 3'): scipy.io.mmread(io.BytesIO(text)) def test_gh11389(): mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n" " 1 1 1\n" "1 1 -2.1846000000000e+02 0.0000000000000e+00"))
bsd-3-clause
n3wb13/OpenNfrGui-5.0-1
lib/python/Screens/HddInfo.py
3
6551
from enigma import * from Plugins.Plugin import PluginDescriptor from Screens.Screen import Screen from Components.ActionMap import ActionMap from Components.MenuList import MenuList from Components.GUIComponent import GUIComponent from Components.HTMLComponent import HTMLComponent from Tools.Directories import fileExists, crawlDirectory from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest from Components.Button import Button from Components.Label import Label from Components.ConfigList import ConfigListScreen from Components.config import ConfigSelection, getConfigListEntry, config import os import sys import re class HddInfo(ConfigListScreen, Screen): skin = """ <screen name="HddInfo" position="center,115" size="900,530" title="HddInfo" flags="wfBorder"> <ePixmap position="10,497" size="35,27" pixmap="skin_default/buttons/red.png" alphatest="blend" /> <ePixmap position="638,497" size="35,27" pixmap="skin_default/buttons/blue.png" alphatest="blend" /> <eLabel text="Hard Drive Info" zPosition="2" position="10,10" size="880,40" halign="left" font="Regular;28" foregroundColor="un538eff" transparent="1" shadowColor="black" shadowOffset="-1,-1" backgroundColor="black" /> <widget font="Regular;22" halign="left" name="model" position="10,85" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="serial" position="10,111" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="firmware" position="11,137" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="cylinders" position="11,163" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="heads" position="11,188" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="sectors" position="10,214" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="readDisk" position="10,240" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="readCache" position="10,266" size="880,26" transparent="1" zPosition="1" /> <widget font="Regular;22" halign="left" name="temp" position="10,292" size="880,26" transparent="1" zPosition="1" /> <widget name="config" position="10,57" size="880,400" scrollbarMode="showOnDemand" transparent="1" /> <widget name="key_red" position="47,499" zPosition="2" size="214,22" valign="center" halign="center" font="Regular; 20" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> <widget name="key_blue" position="675,499" zPosition="3" size="214,22" valign="center" halign="center" font="Regular; 21" transparent="1" backgroundColor="foreground" /> </screen> """ def __init__(self, session, device): Screen.__init__(self, session) self.device = device self.list = [] self.list.append(getConfigListEntry(_("Standby timeout:"), config.usage.hdd_standby)) ConfigListScreen.__init__(self, self.list) self["key_green"] = Button("") self["key_red"] = Button(_("Ok")) self["key_blue"] = Button(_("Exit")) self["key_yellow"] = Button("") self["model"] = Label("Model: unknow") self["serial"] = Label("Serial: unknow") self["firmware"] = Label("Firmware: unknow") self["cylinders"] = Label("Cylinders: unknow") self["heads"] = Label("Heads: unknow") self["sectors"] = Label("Sectors: unknow") self["readDisk"] = Label("Read disk speed: unknow") self["readCache"] = Label("Read disk cache speed: unknow") self["temp"] = Label("Disk temperature: unknow") self["actions"] = ActionMap(["OkCancelActions", "ColorActions"], { "blue": self.keyCancel, #"yellow": self.yellow, "red": self.keySave, "cancel": self.keyCancel, }, -2) self.onLayoutFinish.append(self.drawInfo) def drawInfo(self): device = "/dev/%s" % self.device #regexps modelRe = re.compile(r"Model Number:\s*([\w\-]+)") serialRe = re.compile(r"Serial Number:\s*([\w\-]+)") firmwareRe = re.compile(r"Firmware Revision:\s*([\w\-]+)") cylindersRe = re.compile(r"cylinders\s*(\d+)\s*(\d+)") headsRe = re.compile(r"heads\s*(\d+)\s*(\d+)") sectorsRe = re.compile(r"sectors/track\s*(\d+)\s*(\d+)") readDiskRe = re.compile(r"Timing buffered disk reads:\s*(.*)") readCacheRe = re.compile(r"Timing buffer-cache reads:\s*(.*)") tempRe = re.compile(r"%s:.*:(.*)" % device) # wake up disk... disk in standby may cause not correct value os.system("/sbin/hdparm -S 0 %s" % device) hdparm = os.popen("/sbin/hdparm -I %s" % device) for line in hdparm: model = re.findall(modelRe, line) if model: self["model"].setText("Model: %s" % model[0].lstrip()) serial = re.findall(serialRe, line) if serial: self["serial"].setText("Serial: %s" % serial[0].lstrip()) firmware = re.findall(firmwareRe, line) if firmware: self["firmware"].setText("Firmware: %s" % firmware[0].lstrip()) cylinders = re.findall(cylindersRe, line) if cylinders: self["cylinders"].setText("Cylinders: %s (max) %s (current)" % (cylinders[0][0].lstrip(), cylinders[0][1].lstrip())) heads = re.findall(headsRe, line) if heads: self["heads"].setText("Heads: %s (max) %s (current)" % (heads[0][0].lstrip(), heads[0][1].lstrip())) sectors = re.findall(sectorsRe, line) if sectors: self["sectors"].setText("Sectors: %s (max) %s (current)" % (sectors[0][0].lstrip(), sectors[0][1].lstrip())) hdparm.close() hdparm = os.popen("/sbin/hdparm -t %s" % device) for line in hdparm: readDisk = re.findall(readDiskRe, line) if readDisk: self["readDisk"].setText("Read disk speed: %s" % readDisk[0].lstrip()) hdparm.close() hdparm = os.popen("/sbin/hdparm -T %s" % device) for line in hdparm: readCache = re.findall(readCacheRe, line) if readCache: self["readCache"].setText("Read disk cache speed: %s" % readCache[0].lstrip()) hdparm.close() hddtemp = os.popen("/usr/sbin/hddtemp -q %s" % device) for line in hddtemp: temp = re.findall(tempRe, line) if temp: self["temp"].setText("Disk temperature: %s" % temp[0].lstrip()) hddtemp.close()
gpl-2.0
tschneidereit/servo
tests/wpt/web-platform-tests/webdriver/element_state/visibility_test.py
58
14831
import os import sys import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test from webdriver import exceptions class NaturalNonVisibleElementsTest(base_test.WebDriverBaseTest): def test_0x0_pixel_element_is_not_visible(self): self.driver.get(self.webserver.where_is("element_state/res/0x0-pixels.html")) el = self.driver.find_element_by_css("div") self.assertFalse(el.is_displayed()) def test_0x0_pixel_text_node_is_visible(self): self.driver.get(self.webserver.where_is("element_state/res/0x0-pixels-text-node.html")) el = self.driver.find_element_by_css("p") self.assertTrue(el.is_displayed()) def test_1x1_pixel_element(self): self.driver.get(self.webserver.where_is("element_state/res/1x1-pixels.html")) el = self.driver.find_element_by_css("p") self.assertTrue(el.is_displayed()) def test_zero_sized_element_is_shown_if_decendant_has_size(self): self.driver.get(self.webserver.where_is("element_state/res/zero-sized-element-with-sizable-decendant.html")) parent = self.driver.find_element_by_css("#parent") child = self.driver.find_element_by_css("#child") self.assertTrue(parent.is_displayed()) self.assertTrue(child.is_displayed()) def test_input_type_hidden_is_never_visible(self): self.driver.get(self.webserver.where_is("element_state/res/input-type-hidden.html")) input = self.driver.find_element_by_css("input") self.assertFalse(input.is_displayed()) def test_input_morphs_into_hidden(self): self.driver.get(self.webserver.where_is("element_state/res/input-morphs-into-hidden.html")) input = self.driver.find_element_by_css("input") self.assertFalse(input.is_displayed()) def test_parent_node_visible_when_all_children_are_absolutely_positioned_and_overflow_is_hidden(self): pass def test_parent_of_absolutely_positioned_elements_visible_where_ancestor_overflow_is_hidden(self): """When a parent's ancestor hides any overflow, absolutely positioned child elements are still visible. The parent container is also considered visible by webdriver for this reason because it is interactable.""" self.driver.get(self.webserver.where_is("element_state/res/absolute-children-ancestor-hidden-overflow.html")) children = self.driver.find_elements_by_css(".child") assert all(child.is_displayed() for child in children) parent = self.driver.find_element_by_css("#parent") assert parent.is_displayed() def test_element_hidden_by_overflow_x_is_not_visible(self): # TODO(andreastt): This test should probably be split in three. Also it's making two # assertions. pages = ["element_state/res/x-hidden-y-hidden.html", "element_state/res/x-hidden-y-scroll.html", "element_state/res/x-hidden-y-auto.html"] for page in pages: self.driver.get(self.webserver.where_is(page)) right = self.driver.find_element_by_css("#right") bottom_right = self.driver.find_element_by_css("#bottom-right") self.assertFalse(right.is_displayed()) self.assertFalse(bottom_right.is_displayed()) def test_element_hidden_by_overflow_y_is_not_visible(self): # TODO(andreastt): This test should probably be split in three. Also it's making two # assertions. pages = ["element_state/res/x-hidden-y-hidden.html", "element_state/res/x-scroll-y-hidden.html", "element_state/res/x-auto-y-hidden.html"] for page in pages: self.driver.get(self.webserver.where_is(page)) bottom = self.driver.find_element_by_css("#bottom") bottom_right = self.driver.find_element_by_css("#bottom-right") self.assertFalse(bottom.is_displayed()) self.assertFalse(bottom_right.is_displayed()) def test_parent_node_visible_when_all_children_are_absolutely_position_and_overflow_is_hidden(self): pass def test_element_scrollable_by_overflow_x_is_visible(self): pass def test_element_scrollable_by_overflow_y_is_visible(self): pass def test_element_scrollable_by_overflow_x_and_y_is_visible(self): pass def test_element_scrollable_by_overflow_y_is_visible(self): pass def test_element_outside_viewport(self): self.driver.get(self.webserver.where_is("element_state/res/element-outside-viewport.html")) hidden = self.driver.find_element_by_css("div") self.assertFalse(hidden.is_displayed()) def test_element_dynamically_moved_outside_viewport(self): self.driver.get(self.webserver.where_is("element_state/res/element-dynamically-moved-outside-viewport.html")) hidden = self.driver.find_element_by_css("div") self.assertFalse(hidden.is_displayed()) def test_element_hidden_by_other_element(self): self.driver.get(self.webserver.where_is("element_state/res/element-hidden-by-other-element.html")) overlay = self.driver.find_element_by_css("#overlay") hidden = self.driver.find_element_by_css("#hidden") self.assertTrue(overlay.is_displayed()) self.assertFalse(hidden.is_displayed()) def test_element_partially_hidden_by_other_element(self): self.driver.get(self.webserver.where_is("element_state/res/element-partially-hidden-by-other-element.html")) partial = self.driver.find_element_by_css("#partial") self.assertTrue(partial.is_displayed()) def test_element_hidden_by_z_index(self): self.driver.get(self.webserver.where_is("element_state/res/element-hidden-by-z-index.html")) overlay = self.driver.find_element_by_css("#overlay") hidden = self.driver.find_element_by_css("#hidden") self.assertTrue(overlay.is_displayed()) self.assertFalse(hidden.is_displayed()) def test_element_moved_outside_viewport_by_transform(self): self.driver.get(self.webserver.where_is("element_state/res/element-moved-outside-viewport-by-transform.html")) el = self.driver.find_element_by_css("div") self.assertFalse(el.is_displayed()) def test_element_moved_behind_other_element_by_transform(self): self.driver.get(self.webserver.where_is("element_state/res/element-moved-behind-other-element-by-transform.html")) overlay = self.driver.find_element_by_css("#overlay") hidden = self.driver.find_element_by_css("#hidden") self.assertTrue(overlay.is_displayed()) self.assertFalse(hidden.is_displayed()) def test_text_with_same_color_as_background(self): self.driver.get(self.webserver.where_is("element_state/res/text-with-same-color-as-background.html")) p = self.driver.find_element_by_css("p") self.assertFalse(p.is_displayed()) def test_text_with_same_color_as_parent_background(self): self.driver.get(self.webserver.where_is("element_state/res/text-with-same-color-as-parent-background.html")) p = self.driver.find_element_by_css("p") self.assertFalse(p.is_displayed()) def test_text_with_matching_color_and_background(self): self.driver.get(self.webserver.where_is("element_state/res/text-with-matching-color-and-background.html")) p = self.driver.find_element_by_css("p") self.assertTrue(p.is_displayed()) def test_element_with_same_color_as_background(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-same-color-as-background.html")) el = self.driver.find_element_by_css("div") self.assertFalse(el.is_displayed()) def test_element_with_same_color_as_parent_background(self): self.driver.get(self.webserver.where_is("element_state/res/element-with-same-color-as-parent-background.html")) hidden = self.driver.find_element_by_css("#hidden") self.assertFalse(hidden.is_displayed()) class BodyElementIsAlwaysDisplayedTest(base_test.WebDriverBaseTest): def assert_body_is_displayed_on(self, page): self.driver.get(self.webserver.where_is(page)) body = self.driver.find_element_by_css("body") assert body.is_displayed() def test_implicit(self): self.assert_body_is_displayed_on("element_state/res/body_implicit.html") def test_empty(self): self.assert_body_is_displayed_on("element_state/res/body_empty.html") def test_visibility_hidden(self): self.assert_body_is_displayed_on("element_state/res/body_visibility_hidden.html") def test_overflow_hidden(self): self.assert_body_is_displayed_on("element_state/res/body_overflow_hidden.html") class DisplayTest(base_test.WebDriverBaseTest): def test_display_block(self): self.driver.get(self.webserver.where_is("element_state/res/display-block.html")) el = self.driver.find_element_by_css("p") self.assertTrue(el.is_displayed()) def test_display_none(self): self.driver.get(self.webserver.where_is("element_state/res/display-none.html")) el = self.driver.find_element_by_css("p") self.assertFalse(el.is_displayed()) def test_display_none_hides_child_node(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-child.html")) parent = self.driver.find_element_by_css("#parent") child = self.driver.find_element_by_css("#child") self.assertFalse(parent.is_displayed()) self.assertFalse(child.is_displayed()) def test_display_none_hides_child_node_link(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-child-link.html")) child = self.driver.find_element_by_css("#child") self.assertFalse(child.is_displayed()) def test_display_none_hides_child_node_paragraph(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-child-paragraph.html")) child = self.driver.find_element_by_css("#child") self.assertFalse(child.is_displayed()) def test_display_none_on_parent_takes_presedence(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-parent-presedence.html")) child = self.driver.find_element_by_css("#child") self.assertFalse(child.is_displayed()) def test_display_none_on_parent_takes_presedence_over_visibility_visible(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-parent-presedence-visibility.html")) child = self.driver.find_element_by_css("#child") self.assertFalse(child.is_displayed()) def test_display_none_hidden_dynamically(self): self.driver.get(self.webserver.where_is("element_state/res/display-none-dynamic.html")) hidden = self.driver.find_element_by_css("#hidden") self.assertFalse(hidden.is_displayed()) class VisibilityTest(base_test.WebDriverBaseTest): def test_element_state_hidden(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-hidden.html")) el = self.driver.find_element_by_css("p") self.assertFalse(el.is_displayed()) def test_element_state_visible(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-visible.html")) el = self.driver.find_element_by_css("p") self.assertTrue(el.is_displayed()) def test_visibility_hidden_hides_child_node(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-child.html")) parent = self.driver.find_element_by_css("#parent") child = self.driver.find_element_by_css("#child") self.assertFalse(parent.is_displayed()) self.assertFalse(child.is_displayed()) def test_visibility_hidden_hides_child_node_link(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-child-link.html")) parent = self.driver.find_element_by_css("#parent") child = self.driver.find_element_by_css("#child") self.assertFalse(parent.is_displayed()) self.assertFalse(child.is_displayed()) def test_visibility_hidden_hides_child_node_paragraph(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-child-paragraph.html")) parent = self.driver.find_element_by_css("#parent") child = self.driver.find_element_by_css("#child") self.assertFalse(parent.is_displayed()) self.assertFalse(child.is_displayed()) def test_visibility_hidden_on_child_takes_precedence(self): self.driver.get(self.webserver.where_is("element_state/res/visibility-child-presedence.html")) child = self.driver.find_element_by_css("#child") self.assertTrue(child.is_displayed()) def test_visibility_hidden_on_parent_takes_precedence_over_display_block(self): pass def test_visibility_hidden_set_dynamically(self): pass def test_should_show_element_not_visible_with_hidden_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/hidden.html")) singleHidden = self.driver.find_element_by_css('#singleHidden') self.assertFalse(singleHidden.is_displayed()) def test_should_show_element_not_visible_when_parent_element_has_hidden_attribute(self): self.driver.get(self.webserver.where_is("element_state/res/hidden.html")) child = self.driver.find_element_by_css('#child') self.assertFalse(child.is_displayed()) class VisibilityInteractionTest(base_test.WebDriverBaseTest): def test_input_hidden_is_unclickable(self): self.driver.get(self.webserver.where_is("element_state/res/input-type-hidden-unclickable.html")) input = self.driver.find_element_by_css("input") with self.assertRaises(exceptions.ElementNotVisibleException): input.click() def test_hidden_input_checkbox_is_untogglable(self): self.driver.get(self.webserver.where_is("element_state/res/hidden-input-type-checkbox-untogglable.html")) checkbox = self.driver.find_element_by_css("input") with self.assertRaises(exceptions.ElementNotVisibleException): checkbox.click() def test_typing_in_hidden_input_is_impossible(self): self.driver.get(self.webserver.where_is("element_state/res/hidden-input-type-text-writing.html")) textfield = self.driver.find_element_by_css("input") with self.assertRaises(exceptions.ElementNotVisibleException): textfield.send_keys("Koha is a popular Indian cheese") class OpacityTest(base_test.WebDriverBaseTest): pass if __name__ == "__main__": unittest.main()
mpl-2.0
Ernesto99/odoo
addons/account_bank_statement_extensions/report/bank_statement_balance_report.py
378
2723
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import osv from openerp.report import report_sxw class bank_statement_balance_report(report_sxw.rml_parse): def set_context(self, objects, data, ids, report_type=None): cr = self.cr cr.execute('SELECT s.name as s_name, s.date AS s_date, j.code as j_code, s.balance_end_real as s_balance ' \ 'FROM account_bank_statement s ' \ 'INNER JOIN account_journal j on s.journal_id = j.id ' \ 'INNER JOIN ' \ '(SELECT journal_id, max(date) as max_date FROM account_bank_statement ' \ 'GROUP BY journal_id) d ' \ 'ON (s.journal_id = d.journal_id AND s.date = d.max_date) ' \ 'ORDER BY j.code') lines = cr.dictfetchall() self.localcontext.update( { 'lines': lines, }) super(bank_statement_balance_report, self).set_context(objects, data, ids, report_type=report_type) def __init__(self, cr, uid, name, context): if context is None: context = {} super(bank_statement_balance_report, self).__init__(cr, uid, name, context=context) self.localcontext.update( { 'time': time, }) self.context = context class report_bankstatementbalance(osv.AbstractModel): _name = 'report.account_bank_statement_extensions.report_bankstatementbalance' _inherit = 'report.abstract_report' _template = 'account_bank_statement_extensions.report_bankstatementbalance' _wrapped_report_class = bank_statement_balance_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
google/clif
clif/pybind11/staging/variables_test.py
1
1785
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for clif.pybind11.staging.variables. This file is a copy of clif/testing/python/variables_test.py. """ import unittest from clif.pybind11.staging import variables class VariableTest(unittest.TestCase): def test_const_int(self): self.assertEqual(variables.kMyConstInt, 42) def test_const_int_renamed(self): self.assertEqual(variables.const_int, 123) def test_const_float(self): self.assertEqual(variables.kMyConstFloat, 15.0) def test_const_bool(self): self.assertEqual(variables.kMyConstBool, True) def test_const_complex(self): self.assertEqual(variables.kMyConstComplex, complex(1)) def test_const_array(self): expected_array = [0, 10, 20, 30, 40] self. assertSequenceEqual(expected_array, variables.kMyConstIntArray) def test_const_pair(self): expected_tuple = [0, 10] self.assertSequenceEqual(expected_tuple, variables.kMyConstPair) def test_const_dict(self): expected_dict = {1: 10, 2: 20, 3: 30} self.assertDictEqual(expected_dict, variables.kMyConstMap) def test_const_set(self): expected_set = {1, 2, 3} self.assertSetEqual(expected_set, variables.kMyConstSet) if __name__ == '__main__': unittest.main()
apache-2.0
ghan02/roster
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
240
90755
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This is all roughly based on the Makefile system used by the Linux # kernel, but is a non-recursive make -- we put the entire dependency # graph in front of make and let it figure it out. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level Makefile. This means that all # variables in .mk-files clobber one another. Be careful to use := # where appropriate for immediate evaluation, and similarly to watch # that you're not relying on a variable value to last beween different # .mk files. # # TODOs: # # Global settings and utility functions are currently stuffed in the # toplevel Makefile. It may make sense to generate some .mk files on # the side to keep the the files readable. import os import re import sys import subprocess import gyp import gyp.common import gyp.xcode_emulation from gyp.common import GetEnvironFallback from gyp.common import GypError generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni', 'SHARED_INTERMEDIATE_DIR': '$(obj)/gen', 'PRODUCT_DIR': '$(builddir)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(abspath $<)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(BUILDTYPE)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Request sorted dependencies in the order from dependents to dependencies. generator_wants_sorted_dependencies = False # Placates pylint. generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') default_variables.setdefault('SHARED_LIB_DIR', generator_default_variables['PRODUCT_DIR']) default_variables.setdefault('LIB_DIR', generator_default_variables['PRODUCT_DIR']) # Copy additional generator configuration data from Xcode, which is shared # by the Mac Make generator. import gyp.generator.xcode as xcode_generator global generator_additional_non_configuration_keys generator_additional_non_configuration_keys = getattr(xcode_generator, 'generator_additional_non_configuration_keys', []) global generator_additional_path_sections generator_additional_path_sections = getattr(xcode_generator, 'generator_additional_path_sections', []) global generator_extra_sources_for_rules generator_extra_sources_for_rules = getattr(xcode_generator, 'generator_extra_sources_for_rules', []) COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'}) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)') default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)') def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) android_ndk_version = generator_flags.get('android_ndk_version', None) # Android NDK requires a strict link order. if android_ndk_version: global generator_wants_sorted_dependencies generator_wants_sorted_dependencies = True output_dir = params['options'].generator_output or \ params['options'].toplevel_dir builddir_name = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( output_dir, builddir_name, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': params['options'].toplevel_dir, 'qualified_out_dir': qualified_out_dir, } # The .d checking code below uses these functions: # wildcard, sort, foreach, shell, wordlist # wildcard can handle spaces, the rest can't. # Since I could find no way to make foreach work with spaces in filenames # correctly, the .d files have spaces replaced with another character. The .d # file for # Chromium\ Framework.framework/foo # is for example # out/Release/.deps/out/Release/Chromium?Framework.framework/foo # This is the replacement character. SPACE_REPLACEMENT = '?' LINK_COMMANDS_LINUX = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) # Due to circular dependencies between libraries :(, we wrap the # special "figure out circular dependencies" flags around the entire # input list during linking. quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) # We support two kinds of shared objects (.so): # 1) shared_library, which is just bundling together many dependent libraries # into a link line. # 2) loadable_module, which is generating a module intended for dlopen(). # # They differ only slightly: # In the former case, we want to package all dependent code into the .so. # In the latter case, we want to package just the API exposed by the # outermost module. # This means shared_library uses --whole-archive, while loadable_module doesn't. # (Note that --whole-archive is incompatible with the --start-group used in # normal linking.) # Other shared-object link notes: # - Set SONAME to the library filename so our binaries don't reference # the local, absolute paths used on the link command-line. quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) """ LINK_COMMANDS_MAC = """\ quiet_cmd_alink = LIBTOOL-STATIC $@ cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^) quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ LINK_COMMANDS_ANDROID = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) # Due to circular dependencies between libraries :(, we wrap the # special "figure out circular dependencies" flags around the entire # input list during linking. quiet_cmd_link = LINK($(TOOLSET)) $@ quiet_cmd_link_host = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) # Other shared-object link notes: # - Set SONAME to the library filename so our binaries don't reference # the local, absolute paths used on the link command-line. quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ LINK_COMMANDS_AIX = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ # Header of toplevel Makefile. # This should go into the build tree, but it's easier to keep it here for now. SHARED_HEADER = ("""\ # We borrow heavily from the kernel build setup, though we are simpler since # we don't have Kconfig tweaking settings on us. # The implicit make rules have it looking for RCS files, among other things. # We instead explicitly write all the rules we care about. # It's even quicker (saves ~200ms) to pass -r on the command line. MAKEFLAGS=-r # The source directory tree. srcdir := %(srcdir)s abs_srcdir := $(abspath $(srcdir)) # The name of the builddir. builddir_name ?= %(builddir)s # The V=1 flag on command line makes us verbosely print command lines. ifdef V quiet= else quiet=quiet_ endif # Specify BUILDTYPE=Release on the command line for a release build. BUILDTYPE ?= %(default_configuration)s # Directory all our build output goes into. # Note that this must be two directories beneath src/ for unit tests to pass, # as they reach into the src/ directory for data with relative paths. builddir ?= $(builddir_name)/$(BUILDTYPE) abs_builddir := $(abspath $(builddir)) depsdir := $(builddir)/.deps # Object output directory. obj := $(builddir)/obj abs_obj := $(abspath $(obj)) # We build up a list of every single one of the targets so we can slurp in the # generated dependency rule Makefiles in one pass. all_deps := %(make_global_settings)s CC.target ?= %(CC.target)s CFLAGS.target ?= $(CFLAGS) CXX.target ?= %(CXX.target)s CXXFLAGS.target ?= $(CXXFLAGS) LINK.target ?= %(LINK.target)s LDFLAGS.target ?= $(LDFLAGS) AR.target ?= $(AR) # C++ apps need to be linked with g++. LINK ?= $(CXX.target) # TODO(evan): move all cross-compilation logic to gyp-time so we don't need # to replicate this environment fallback in make as well. CC.host ?= %(CC.host)s CFLAGS.host ?= CXX.host ?= %(CXX.host)s CXXFLAGS.host ?= LINK.host ?= %(LINK.host)s LDFLAGS.host ?= AR.host ?= %(AR.host)s # Define a dir function that can handle spaces. # http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions # "leading spaces cannot appear in the text of the first argument as written. # These characters can be put into the argument value by variable substitution." empty := space := $(empty) $(empty) # http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1) unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1) dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1))) # Flags to make gcc output dependency info. Note that you need to be # careful here to use the flags that ccache and distcc can understand. # We write to a dep file on the side first and then rename at the end # so we can't end up with a broken dep file. depfile = $(depsdir)/$(call replace_spaces,$@).d DEPFLAGS = -MMD -MF $(depfile).raw # We have to fixup the deps output in a few ways. # (1) the file output should mention the proper .o file. # ccache or distcc lose the path to the target, so we convert a rule of # the form: # foobar.o: DEP1 DEP2 # into # path/to/foobar.o: DEP1 DEP2 # (2) we want missing files not to cause us to fail to build. # We want to rewrite # foobar.o: DEP1 DEP2 \\ # DEP3 # to # DEP1: # DEP2: # DEP3: # so if the files are missing, they're just considered phony rules. # We have to do some pretty insane escaping to get those backslashes # and dollar signs past make, the shell, and sed at the same time. # Doesn't work with spaces, but that's fine: .d files have spaces in # their names replaced with other characters.""" r""" define fixup_dep # The depfile may not exist if the input file didn't have any #includes. touch $(depfile).raw # Fixup path as in (1). sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile) # Add extra rules as in (2). # We remove slashes and replace spaces with new lines; # remove blank lines; # delete the first line and append a colon to the remaining lines. sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\ grep -v '^$$' |\ sed -e 1d -e 's|$$|:|' \ >> $(depfile) rm $(depfile).raw endef """ """ # Command definitions: # - cmd_foo is the actual command to run; # - quiet_cmd_foo is the brief-output summary of the command. quiet_cmd_cc = CC($(TOOLSET)) $@ cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_cxx = CXX($(TOOLSET)) $@ cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< %(extra_commands)s quiet_cmd_touch = TOUCH $@ cmd_touch = touch $@ quiet_cmd_copy = COPY $@ # send stderr to /dev/null to ignore messages when linking directories. cmd_copy = rm -rf "$@" && cp -af "$<" "$@" %(link_commands)s """ r""" # Define an escape_quotes function to escape single quotes. # This allows us to handle quotes properly as long as we always use # use single quotes and escape_quotes. escape_quotes = $(subst ','\'',$(1)) # This comment is here just to include a ' to unconfuse syntax highlighting. # Define an escape_vars function to escape '$' variable syntax. # This allows us to read/write command lines with shell variables (e.g. # $LD_LIBRARY_PATH), without triggering make substitution. escape_vars = $(subst $$,$$$$,$(1)) # Helper that expands to a shell command to echo a string exactly as it is in # make. This uses printf instead of echo because printf's behaviour with respect # to escape sequences is more portable than echo's across different shells # (e.g., dash, bash). exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))' """ """ # Helper to compare the command we're about to run against the command # we logged the last time we ran the command. Produces an empty # string (false) when the commands match. # Tricky point: Make has no string-equality test function. # The kernel uses the following, but it seems like it would have false # positives, where one string reordered its arguments. # arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\ # $(filter-out $(cmd_$@), $(cmd_$(1)))) # We instead substitute each for the empty string into the other, and # say they're equal if both substitutions produce the empty string. # .d files contain """ + SPACE_REPLACEMENT + \ """ instead of spaces, take that into account. command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\ $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1)))) # Helper that is non-empty when a prerequisite changes. # Normally make does this implicitly, but we force rules to always run # so we can check their command lines. # $? -- new prerequisites # $| -- order-only dependencies prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?)) # Helper that executes all postbuilds until one fails. define do_postbuilds @E=0;\\ for p in $(POSTBUILDS); do\\ eval $$p;\\ E=$$?;\\ if [ $$E -ne 0 ]; then\\ break;\\ fi;\\ done;\\ if [ $$E -ne 0 ]; then\\ rm -rf "$@";\\ exit $$E;\\ fi endef # do_cmd: run a command via the above cmd_foo names, if necessary. # Should always run for a given target to handle command-line changes. # Second argument, if non-zero, makes it do asm/C/C++ dependency munging. # Third argument, if non-zero, makes it do POSTBUILDS processing. # Note: We intentionally do NOT call dirx for depfile, since it contains """ + \ SPACE_REPLACEMENT + """ for # spaces already and dirx strips the """ + SPACE_REPLACEMENT + \ """ characters. define do_cmd $(if $(or $(command_changed),$(prereq_changed)), @$(call exact_echo, $($(quiet)cmd_$(1))) @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))" $(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))), @$(cmd_$(1)) @echo " $(quiet_cmd_$(1)): Finished", @$(cmd_$(1)) ) @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile) @$(if $(2),$(fixup_dep)) $(if $(and $(3), $(POSTBUILDS)), $(call do_postbuilds) ) ) endef # Declare the "%(default_target)s" target first so it is the default, # even though we don't have the deps yet. .PHONY: %(default_target)s %(default_target)s: # make looks for ways to re-generate included makefiles, but in our case, we # don't have a direct way. Explicitly telling make that it has nothing to do # for them makes it go faster. %%.d: ; # Use FORCE_DO_CMD to force a target to run. Should be coupled with # do_cmd. .PHONY: FORCE_DO_CMD FORCE_DO_CMD: """) SHARED_HEADER_MAC_COMMANDS = """ quiet_cmd_objc = CXX($(TOOLSET)) $@ cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_objcxx = CXX($(TOOLSET)) $@ cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< # Commands for precompiled header files. quiet_cmd_pch_c = CXX($(TOOLSET)) $@ cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_pch_cc = CXX($(TOOLSET)) $@ cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_pch_m = CXX($(TOOLSET)) $@ cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_pch_mm = CXX($(TOOLSET)) $@ cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< # gyp-mac-tool is written next to the root Makefile by gyp. # Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd # already. quiet_cmd_mac_tool = MACTOOL $(4) $< cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@" quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@ cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4) quiet_cmd_infoplist = INFOPLIST $@ cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@" """ def WriteRootHeaderSuffixRules(writer): extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower) writer.write('# Suffix rules, putting all outputs into $(obj).\n') for ext in extensions: writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n# Try building from generated source, too.\n') for ext in extensions: writer.write( '$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n') for ext in extensions: writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n') SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\ # Suffix rules, putting all outputs into $(obj). """) SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\ # Try building from generated source, too. """) SHARED_FOOTER = """\ # "all" is a concatenation of the "all" targets from all the included # sub-makefiles. This is just here to clarify. all: # Add in dependency-tracking rules. $(all_deps) is the list of every single # target in our tree. Only consider the ones with .d (dependency) info: d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d)) ifneq ($(d_files),) include $(d_files) endif """ header = """\ # This file is generated by gyp; do not edit. """ # Maps every compilable file extension to the do_cmd that compiles it. COMPILABLE_EXTENSIONS = { '.c': 'cc', '.cc': 'cxx', '.cpp': 'cxx', '.cxx': 'cxx', '.s': 'cc', '.S': 'cc', } def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS): if res: return True return False def Linkable(filename): """Return true if the file is linkable (should be on the link line).""" return filename.endswith('.o') def Target(filename): """Translate a compilable filename to its .o target.""" return os.path.splitext(filename)[0] + '.o' def EscapeShellArgument(s): """Quotes an argument so that it will be interpreted literally by a POSIX shell. Taken from http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python """ return "'" + s.replace("'", "'\\''") + "'" def EscapeMakeVariableExpansion(s): """Make has its own variable expansion syntax using $. We must escape it for string to be interpreted literally.""" return s.replace('$', '$$') def EscapeCppDefine(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = EscapeShellArgument(s) s = EscapeMakeVariableExpansion(s) # '#' characters must be escaped even embedded in a string, else Make will # treat it as the start of a comment. return s.replace('#', r'\#') def QuoteIfNecessary(string): """TODO: Should this ideally be replaced with one or more of the above functions?""" if '"' in string: string = '"' + string.replace('"', '\\"') + '"' return string def StringToMakefileVariable(string): """Convert a string to a value that is acceptable as a make variable name.""" return re.sub('[^a-zA-Z0-9_]', '_', string) srcdir_prefix = '' def Sourceify(path): """Convert a path to its source directory form.""" if '$(' in path: return path if os.path.isabs(path): return path return srcdir_prefix + path def QuoteSpaces(s, quote=r'\ '): return s.replace(' ', quote) # TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py. def _ValidateSourcesForOSX(spec, all_sources): """Makes sure if duplicate basenames are not specified in the source list. Arguments: spec: The target dictionary containing the properties of the target. """ if spec.get('type', None) != 'static_library': return basenames = {} for source in all_sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % spec['target_name'] + error + 'libtool on OS X will generate' + ' warnings for them.') raise GypError('Duplicate basenames in sources section, see list above') # Map from qualified target to path to output. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class MakefileWriter(object): """MakefileWriter packages up the writing of one target-specific foobar.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, generator_flags, flavor): self.generator_flags = generator_flags self.flavor = flavor self.suffix_rules_srcdir = {} self.suffix_rules_objdir1 = {} self.suffix_rules_objdir2 = {} # Generate suffix rules for all compilable extensions. for ext in COMPILABLE_EXTENSIONS.keys(): # Suffix rules for source folder. self.suffix_rules_srcdir.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) # Suffix rules for generated source files. self.suffix_rules_objdir1.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) self.suffix_rules_objdir2.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) def Write(self, qualified_target, base_path, output_filename, spec, configs, part_of_all): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) if self.flavor == 'mac': self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) else: self.xcode_settings = None deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] extra_link_deps = [] extra_mac_bundle_resources = [] mac_bundle_deps = [] if self.is_mac_bundle: self.output = self.ComputeMacBundleOutput(spec) self.output_binary = self.ComputeMacBundleBinaryOutput(spec) else: self.output = self.output_binary = self.ComputeOutput(spec) self.is_standalone_static_library = bool( spec.get('standalone_static_library', 0)) self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', 'shared_library') if (self.is_standalone_static_library or self.type in self._INSTALLABLE_TARGETS): self.alias = os.path.basename(self.output) install_path = self._InstallableTargetInstallPath() else: self.alias = self.output install_path = self.output self.WriteLn("TOOLSET := " + self.toolset) self.WriteLn("TARGET := " + self.target) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs, part_of_all) # Bundle resources. if self.is_mac_bundle: all_mac_bundle_resources = ( spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources) self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps) self.WriteMacInfoPlist(mac_bundle_deps) # Sources. all_sources = spec.get('sources', []) + extra_sources if all_sources: if self.flavor == 'mac': # libtool on OS X generates warnings for duplicate basenames in the same # target. _ValidateSourcesForOSX(spec, all_sources) self.WriteSources( configs, deps, all_sources, extra_outputs, extra_link_deps, part_of_all, gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)), self.Pchify)) sources = filter(Compilable, all_sources) if sources: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1) extensions = set([os.path.splitext(s)[1] for s in sources]) for ext in extensions: if ext in self.suffix_rules_srcdir: self.WriteLn(self.suffix_rules_srcdir[ext]) self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2) for ext in extensions: if ext in self.suffix_rules_objdir1: self.WriteLn(self.suffix_rules_objdir1[ext]) for ext in extensions: if ext in self.suffix_rules_objdir2: self.WriteLn(self.suffix_rules_objdir2[ext]) self.WriteLn('# End of this set of suffix rules') # Add dependency from bundle to bundle binary. if self.is_mac_bundle: mac_bundle_deps.append(self.output_binary) self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps, mac_bundle_deps, extra_outputs, part_of_all) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = install_path # Update global list of link dependencies. if self.type in ('static_library', 'shared_library'): target_link_deps[qualified_target] = self.output_binary # Currently any versions have the same effect, but in future the behavior # could be different. if self.generator_flags.get('android_ndk_version', None): self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps) self.fp.close() def WriteSubMake(self, output_filename, makefile_path, targets, build_dir): """Write a "sub-project" Makefile. This is a small, wrapper Makefile that calls the top-level Makefile to build the targets from a single gyp file (i.e. a sub-project). Arguments: output_filename: sub-project Makefile name to write makefile_path: path to the top-level Makefile targets: list of "all" targets for this sub-project build_dir: build output directory, relative to the sub-project """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) # For consistency with other builders, put sub-project build output in the # sub-project dir (see test/subdirectory/gyptest-subdir-all.py). self.WriteLn('export builddir_name ?= %s' % os.path.join(os.path.dirname(output_filename), build_dir)) self.WriteLn('.PHONY: all') self.WriteLn('all:') if makefile_path: makefile_path = ' -C ' + makefile_path self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets))) self.fp.close() def WriteActions(self, actions, extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) part_of_all: flag indicating this target is part of 'all' """ env = self.GetSortedXcodeEnv() for action in actions: name = StringToMakefileVariable('%s_%s' % (self.qualified_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs if int(action.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs # Write the actual command. action_commands = action['action'] if self.flavor == 'mac': action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action_commands] command = gyp.common.EncodePOSIXShellList(action_commands) if 'message' in action: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message'])) else: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name)) if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd %s; ' % Sourceify(self.path or '.') # command and cd_action get written to a toplevel variable called # cmd_foo. Toplevel variables can't handle things that change per # makefile like $(TARGET), so hardcode the target. command = command.replace('$(TARGET)', self.target) cd_action = cd_action.replace('$(TARGET)', self.target) # Set LD_LIBRARY_PATH in case the action runs an executable from this # build which links to shared libs from this build. # actions run on the host, so they should in theory only use host # libraries, but until everything is made cross-compile safe, also use # target libraries. # TODO(piman): when everything is cross-compile safe, remove lib.target self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:' '$(builddir)/lib.target:$$LD_LIBRARY_PATH; ' 'export LD_LIBRARY_PATH; ' '%s%s' % (name, cd_action, command)) self.WriteLn() outputs = map(self.Absolutify, outputs) # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the obj # variable for the action rule with an absolute version so that the output # goes in the right place. # Only write the 'obj' and 'builddir' rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. # Same for environment. self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0])) self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0])) self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv()) for input in inputs: assert ' ' not in input, ( "Spaces in action input filenames not supported (%s)" % input) for output in outputs: assert ' ' not in output, ( "Spaces in action output filenames not supported (%s)" % output) # See the comment in WriteCopies about expanding env vars. outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), part_of_all=part_of_all, command=name) # Stuff the outputs in a variable so we can refer to them later. outputs_variable = 'action_%s_outputs' % name self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs))) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) part_of_all: flag indicating this target is part of 'all' """ env = self.GetSortedXcodeEnv() for rule in rules: name = StringToMakefileVariable('%s_%s' % (self.qualified_target, rule['rule_name'])) count = 0 self.WriteLn('### Generated for rule %s:' % name) all_outputs = [] for rule_source in rule.get('rule_sources', []): dirs = set() (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] for out in outputs: dir = os.path.dirname(out) if dir: dirs.add(dir) if int(rule.get('process_outputs_as_sources', False)): extra_sources += outputs if int(rule.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs inputs = map(Sourceify, map(self.Absolutify, [rule_source] + rule.get('inputs', []))) actions = ['$(call do_cmd,%s_%d)' % (name, count)] if name == 'resources_grit': # HACK: This is ugly. Grit intentionally doesn't touch the # timestamp of its output file when the file doesn't change, # which is fine in hash-based dependency systems like scons # and forge, but not kosher in the make world. After some # discussion, hacking around it here seems like the least # amount of pain. actions += ['@touch --no-create $@'] # See the comment in WriteCopies about expanding env vars. outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] outputs = map(self.Absolutify, outputs) all_outputs += outputs # Only write the 'obj' and 'builddir' rules for the "primary" output # (:1); it's superfluous for the "extra outputs", and this avoids # accidentally writing duplicate dummy rules for those outputs. self.WriteLn('%s: obj := $(abs_obj)' % outputs[0]) self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0]) self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions) # Spaces in rule filenames are not supported, but rule variables have # spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)'). # The spaces within the variables are valid, so remove the variables # before checking. variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)') for output in outputs: output = re.sub(variables_with_spaces, '', output) assert ' ' not in output, ( "Spaces in rule filenames not yet supported (%s)" % output) self.WriteLn('all_deps += %s' % ' '.join(outputs)) action = [self.ExpandInputRoot(ac, rule_source_root, rule_source_dirname) for ac in rule['action']] mkdirs = '' if len(dirs) > 0: mkdirs = 'mkdir -p %s; ' % ' '.join(dirs) cd_action = 'cd %s; ' % Sourceify(self.path or '.') # action, cd_action, and mkdirs get written to a toplevel variable # called cmd_foo. Toplevel variables can't handle things that change # per makefile like $(TARGET), so hardcode the target. if self.flavor == 'mac': action = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action] action = gyp.common.EncodePOSIXShellList(action) action = action.replace('$(TARGET)', self.target) cd_action = cd_action.replace('$(TARGET)', self.target) mkdirs = mkdirs.replace('$(TARGET)', self.target) # Set LD_LIBRARY_PATH in case the rule runs an executable from this # build which links to shared libs from this build. # rules run on the host, so they should in theory only use host # libraries, but until everything is made cross-compile safe, also use # target libraries. # TODO(piman): when everything is cross-compile safe, remove lib.target self.WriteLn( "cmd_%(name)s_%(count)d = LD_LIBRARY_PATH=" "$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; " "export LD_LIBRARY_PATH; " "%(cd_action)s%(mkdirs)s%(action)s" % { 'action': action, 'cd_action': cd_action, 'count': count, 'mkdirs': mkdirs, 'name': name, }) self.WriteLn( 'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % { 'count': count, 'name': name, }) self.WriteLn() count += 1 outputs_variable = 'rule_%s_outputs' % name self.WriteList(all_outputs, outputs_variable) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn('### Finished generating for rule: %s' % name) self.WriteLn() self.WriteLn('### Finished generating for all rules') self.WriteLn('') def WriteCopies(self, copies, extra_outputs, part_of_all): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Generated for copy rule.') variable = StringToMakefileVariable(self.qualified_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # Absolutify() may call normpath, and will strip trailing slashes. path = Sourceify(self.Absolutify(path)) filename = os.path.split(path)[1] output = Sourceify(self.Absolutify(os.path.join(copy['destination'], filename))) # If the output path has variables in it, which happens in practice for # 'copies', writing the environment as target-local doesn't work, # because the variables are already needed for the target name. # Copying the environment variables into global make variables doesn't # work either, because then the .d files will potentially contain spaces # after variable expansion, and .d file handling cannot handle spaces. # As a workaround, manually expand variables at gyp time. Since 'copies' # can't run scripts, there's no need to write the env then. # WriteDoCmd() will escape spaces for .d files. env = self.GetSortedXcodeEnv() output = gyp.xcode_emulation.ExpandEnvVars(output, env) path = gyp.xcode_emulation.ExpandEnvVars(path, env) self.WriteDoCmd([output], [path], 'copy', part_of_all) outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteMacBundleResources(self, resources, bundle_deps): """Writes Makefile code for 'mac_bundle_resources'.""" self.WriteLn('### Generated for mac_bundle_resources') for output, res in gyp.xcode_emulation.GetMacBundleResources( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, map(Sourceify, map(self.Absolutify, resources))): _, ext = os.path.splitext(output) if ext != '.xcassets': # Make does not supports '.xcassets' emulation. self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource', part_of_all=True) bundle_deps.append(output) def WriteMacInfoPlist(self, bundle_deps): """Write Makefile code for bundle Info.plist files.""" info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, lambda p: Sourceify(self.Absolutify(p))) if not info_plist: return if defines: # Create an intermediate file to store preprocessed results. intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' + os.path.basename(info_plist)) self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D', quoter=EscapeCppDefine) self.WriteMakeRule([intermediate_plist], [info_plist], ['$(call do_cmd,infoplist)', # "Convert" the plist so that any weird whitespace changes from the # preprocessor do not affect the XML parser in mac_tool. '@plutil -convert xml1 $@ $@']) info_plist = intermediate_plist # plists can contain envvars and substitute them into the file. self.WriteSortedXcodeEnv( out, self.GetSortedXcodeEnv(additional_settings=extra_env)) self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist', part_of_all=True) bundle_deps.append(out) def WriteSources(self, configs, deps, sources, extra_outputs, extra_link_deps, part_of_all, precompiled_header): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. configs, deps, sources: input from gyp. extra_outputs: a list of extra outputs this action should be dependent on; used to serialize action/rules before compilation extra_link_deps: a list that will be filled in with any outputs of compilation (to be used in link lines) part_of_all: flag indicating this target is part of 'all' """ # Write configuration-specific variables for CFLAGS, etc. for configname in sorted(configs.keys()): config = configs[configname] self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D', quoter=EscapeCppDefine) if self.flavor == 'mac': cflags = self.xcode_settings.GetCflags(configname) cflags_c = self.xcode_settings.GetCflagsC(configname) cflags_cc = self.xcode_settings.GetCflagsCC(configname) cflags_objc = self.xcode_settings.GetCflagsObjC(configname) cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname) else: cflags = config.get('cflags') cflags_c = config.get('cflags_c') cflags_cc = config.get('cflags_cc') self.WriteLn("# Flags passed to all source files."); self.WriteList(cflags, 'CFLAGS_%s' % configname) self.WriteLn("# Flags passed to only C files."); self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname) self.WriteLn("# Flags passed to only C++ files."); self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname) if self.flavor == 'mac': self.WriteLn("# Flags passed to only ObjC files."); self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname) self.WriteLn("# Flags passed to only ObjC++ files."); self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname) includes = config.get('include_dirs') if includes: includes = map(Sourceify, map(self.Absolutify, includes)) self.WriteList(includes, 'INCS_%s' % configname, prefix='-I') compilable = filter(Compilable, sources) objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable))) self.WriteList(objs, 'OBJS') for obj in objs: assert ' ' not in obj, ( "Spaces in object filenames not supported (%s)" % obj) self.WriteLn('# Add to the list of files we specially track ' 'dependencies for.') self.WriteLn('all_deps += $(OBJS)') self.WriteLn() # Make sure our dependencies are built first. if deps: self.WriteMakeRule(['$(OBJS)'], deps, comment = 'Make sure our dependencies are built ' 'before any of us.', order_only = True) # Make sure the actions and rules run first. # If they generate any extra headers etc., the per-.o file dep tracking # will catch the proper rebuilds, so order only is still ok here. if extra_outputs: self.WriteMakeRule(['$(OBJS)'], extra_outputs, comment = 'Make sure our actions/rules run ' 'before any of us.', order_only = True) pchdeps = precompiled_header.GetObjDependencies(compilable, objs ) if pchdeps: self.WriteLn('# Dependencies from obj files to their precompiled headers') for source, obj, gch in pchdeps: self.WriteLn('%s: %s' % (obj, gch)) self.WriteLn('# End precompiled header dependencies') if objs: extra_link_deps.append('$(OBJS)') self.WriteLn("""\ # CFLAGS et al overrides must be target-local. # See "Target-specific Variable Values" in the GNU Make manual.""") self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)") self.WriteLn("$(OBJS): GYP_CFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('c') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_C_$(BUILDTYPE))") self.WriteLn("$(OBJS): GYP_CXXFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('cc') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_CC_$(BUILDTYPE))") if self.flavor == 'mac': self.WriteLn("$(OBJS): GYP_OBJCFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('m') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_C_$(BUILDTYPE)) " "$(CFLAGS_OBJC_$(BUILDTYPE))") self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('mm') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_CC_$(BUILDTYPE)) " "$(CFLAGS_OBJCC_$(BUILDTYPE))") self.WritePchTargets(precompiled_header.GetPchBuildCommands()) # If there are any object files in our input file list, link them into our # output. extra_link_deps += filter(Linkable, sources) self.WriteLn() def WritePchTargets(self, pch_commands): """Writes make rules to compile prefix headers.""" if not pch_commands: return for gch, lang_flag, lang, input in pch_commands: extra_flags = { 'c': '$(CFLAGS_C_$(BUILDTYPE))', 'cc': '$(CFLAGS_CC_$(BUILDTYPE))', 'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))', 'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))', }[lang] var_name = { 'c': 'GYP_PCH_CFLAGS', 'cc': 'GYP_PCH_CXXFLAGS', 'm': 'GYP_PCH_OBJCFLAGS', 'mm': 'GYP_PCH_OBJCXXFLAGS', }[lang] self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) + "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "$(CFLAGS_$(BUILDTYPE)) " + extra_flags) self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input)) self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang) self.WriteLn('') assert ' ' not in gch, ( "Spaces in gch filenames not supported (%s)" % gch) self.WriteLn('all_deps += %s' % gch) self.WriteLn('') def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ assert not self.is_mac_bundle if self.flavor == 'mac' and self.type in ( 'static_library', 'executable', 'shared_library', 'loadable_module'): return self.xcode_settings.GetExecutablePath() target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': if target[:3] == 'lib': target = target[3:] target_prefix = 'lib' target_ext = '.a' elif self.type in ('loadable_module', 'shared_library'): if target[:3] == 'lib': target = target[3:] target_prefix = 'lib' target_ext = '.so' elif self.type == 'none': target = '%s.stamp' % target elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext return target_prefix + target + target_ext def _InstallImmediately(self): return self.toolset == 'target' and self.flavor == 'mac' and self.type in ( 'static_library', 'executable', 'shared_library', 'loadable_module') def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ assert not self.is_mac_bundle path = os.path.join('$(obj).' + self.toolset, self.path) if self.type == 'executable' or self._InstallImmediately(): path = '$(builddir)' path = spec.get('product_dir', path) return os.path.join(path, self.ComputeOutputBasename(spec)) def ComputeMacBundleOutput(self, spec): """Return the 'output' (full output path) to a bundle output directory.""" assert self.is_mac_bundle path = generator_default_variables['PRODUCT_DIR'] return os.path.join(path, self.xcode_settings.GetWrapperName()) def ComputeMacBundleBinaryOutput(self, spec): """Return the 'output' (full output path) to the binary in a bundle.""" path = generator_default_variables['PRODUCT_DIR'] return os.path.join(path, self.xcode_settings.GetExecutablePath()) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) # TODO: It seems we need to transitively link in libraries (e.g. -lfoo)? # This hack makes it work: # link_deps.extend(spec.get('libraries', [])) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteDependencyOnExtraOutputs(self, target, extra_outputs): self.WriteMakeRule([self.output_binary], extra_outputs, comment = 'Build our special outputs first.', order_only = True) def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps, extra_outputs, part_of_all): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() extra_outputs: any extra outputs that our target should depend on part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Rules for final target.') if extra_outputs: self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs) self.WriteMakeRule(extra_outputs, deps, comment=('Preserve order dependency of ' 'special output on deps.'), order_only = True) target_postbuilds = {} if self.type != 'none': for configname in sorted(configs.keys()): config = configs[configname] if self.flavor == 'mac': ldflags = self.xcode_settings.GetLdflags(configname, generator_default_variables['PRODUCT_DIR'], lambda p: Sourceify(self.Absolutify(p))) # TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on. gyp_to_build = gyp.common.InvertRelativePath(self.path) target_postbuild = self.xcode_settings.AddImplicitPostbuilds( configname, QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output))), QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output_binary)))) if target_postbuild: target_postbuilds[configname] = target_postbuild else: ldflags = config.get('ldflags', []) # Compute an rpath for this output if needed. if any(dep.endswith('.so') or '.so.' in dep for dep in deps): # We want to get the literal string "$ORIGIN" into the link command, # so we need lots of escaping. ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset) ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' % self.toolset) library_dirs = config.get('library_dirs', []) ldflags += [('-L%s' % library_dir) for library_dir in library_dirs] self.WriteList(ldflags, 'LDFLAGS_%s' % configname) if self.flavor == 'mac': self.WriteList(self.xcode_settings.GetLibtoolflags(configname), 'LIBTOOLFLAGS_%s' % configname) libraries = spec.get('libraries') if libraries: # Remove duplicate entries libraries = gyp.common.uniquer(libraries) if self.flavor == 'mac': libraries = self.xcode_settings.AdjustLibraries(libraries) self.WriteList(libraries, 'LIBS') self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary)) self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary)) if self.flavor == 'mac': self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary)) # Postbuild actions. Like actions, but implicitly depend on the target's # output. postbuilds = [] if self.flavor == 'mac': if target_postbuilds: postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))') postbuilds.extend( gyp.xcode_emulation.GetSpecPostbuildCommands(spec)) if postbuilds: # Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE), # so we must output its definition first, since we declare variables # using ":=". self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv()) for configname in target_postbuilds: self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' % (QuoteSpaces(self.output), configname, gyp.common.EncodePOSIXShellList(target_postbuilds[configname]))) # Postbuilds expect to be run in the gyp file's directory, so insert an # implicit postbuild to cd to there. postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path])) for i in xrange(len(postbuilds)): if not postbuilds[i].startswith('$'): postbuilds[i] = EscapeShellArgument(postbuilds[i]) self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output)) self.WriteLn('%s: POSTBUILDS := %s' % ( QuoteSpaces(self.output), ' '.join(postbuilds))) # A bundle directory depends on its dependencies such as bundle resources # and bundle binary. When all dependencies have been built, the bundle # needs to be packaged. if self.is_mac_bundle: # If the framework doesn't contain a binary, then nothing depends # on the actions -- make the framework depend on them directly too. self.WriteDependencyOnExtraOutputs(self.output, extra_outputs) # Bundle dependencies. Note that the code below adds actions to this # target, so if you move these two lines, move the lines below as well. self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS') self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output)) # After the framework is built, package it. Needs to happen before # postbuilds, since postbuilds depend on this. if self.type in ('shared_library', 'loadable_module'): self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' % self.xcode_settings.GetFrameworkVersion()) # Bundle postbuilds can depend on the whole bundle, so run them after # the bundle is packaged, not already after the bundle binary is done. if postbuilds: self.WriteLn('\t@$(call do_postbuilds)') postbuilds = [] # Don't write postbuilds for target's output. # Needed by test/mac/gyptest-rebuild.py. self.WriteLn('\t@true # No-op, used by tests') # Since this target depends on binary and resources which are in # nested subfolders, the framework directory will be older than # its dependencies usually. To prevent this rule from executing # on every build (expensive, especially with postbuilds), expliclity # update the time on the framework directory. self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output)) if postbuilds: assert not self.is_mac_bundle, ('Postbuilds for bundles should be done ' 'on the bundle, not the binary (target \'%s\')' % self.target) assert 'product_dir' not in spec, ('Postbuilds do not work with ' 'custom product_dir') if self.type == 'executable': self.WriteLn('%s: LD_INPUTS := %s' % ( QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps)))) if self.toolset == 'host' and self.flavor == 'android': self.WriteDoCmd([self.output_binary], link_deps, 'link_host', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all, postbuilds=postbuilds) elif self.type == 'static_library': for link_dep in link_deps: assert ' ' not in link_dep, ( "Spaces in alink input filenames not supported (%s)" % link_dep) if (self.flavor not in ('mac', 'openbsd', 'win') and not self.is_standalone_static_library): self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all, postbuilds=postbuilds) elif self.type == 'shared_library': self.WriteLn('%s: LD_INPUTS := %s' % ( QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps)))) self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all, postbuilds=postbuilds) elif self.type == 'loadable_module': for link_dep in link_deps: assert ' ' not in link_dep, ( "Spaces in module input filenames not supported (%s)" % link_dep) if self.toolset == 'host' and self.flavor == 'android': self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd( [self.output_binary], link_deps, 'solink_module', part_of_all, postbuilds=postbuilds) elif self.type == 'none': # Write a stamp line. self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all, postbuilds=postbuilds) else: print "WARNING: no output for", self.type, target # Add an alias for each target (if there are any outputs). # Installable target aliases are created below. if ((self.output and self.output != self.target) and (self.type not in self._INSTALLABLE_TARGETS)): self.WriteMakeRule([self.target], [self.output], comment='Add target alias', phony = True) if part_of_all: self.WriteMakeRule(['all'], [self.target], comment = 'Add target alias to "all" target.', phony = True) # Add special-case rules for our installable targets. # 1) They need to install to the build dir or "product" dir. # 2) They get shortcuts for building (e.g. "make chrome"). # 3) They are part of "make all". if (self.type in self._INSTALLABLE_TARGETS or self.is_standalone_static_library): if self.type == 'shared_library': file_desc = 'shared library' elif self.type == 'static_library': file_desc = 'static library' else: file_desc = 'executable' install_path = self._InstallableTargetInstallPath() installable_deps = [self.output] if (self.flavor == 'mac' and not 'product_dir' in spec and self.toolset == 'target'): # On mac, products are created in install_path immediately. assert install_path == self.output, '%s != %s' % ( install_path, self.output) # Point the target alias to the final binary output. self.WriteMakeRule([self.target], [install_path], comment='Add target alias', phony = True) if install_path != self.output: assert not self.is_mac_bundle # See comment a few lines above. self.WriteDoCmd([install_path], [self.output], 'copy', comment = 'Copy this to the %s output path.' % file_desc, part_of_all=part_of_all) installable_deps.append(install_path) if self.output != self.alias and self.alias != self.target: self.WriteMakeRule([self.alias], installable_deps, comment = 'Short alias for building this %s.' % file_desc, phony = True) if part_of_all: self.WriteMakeRule(['all'], [install_path], comment = 'Add %s to "all" target.' % file_desc, phony = True) def WriteList(self, value_list, variable=None, prefix='', quoter=QuoteIfNecessary): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None, postbuilds=False): """Write a Makefile rule that uses do_cmd. This makes the outputs dependent on the command line that was run, as well as support the V= make command line flag. """ suffix = '' if postbuilds: assert ',' not in command suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS self.WriteMakeRule(outputs, inputs, actions = ['$(call do_cmd,%s%s)' % (command, suffix)], comment = comment, force = True) # Add our outputs to the list of targets we read depfiles from. # all_deps is only used for deps file reading, and for deps files we replace # spaces with ? because escaping doesn't work with make's $(sort) and # other functions. outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs] self.WriteLn('all_deps += %s' % ' '.join(outputs)) def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, order_only=False, force=False, phony=False): """Write a Makefile rule, with some extra tricks. outputs: a list of outputs for the rule (note: this is not directly supported by make; see comments below) inputs: a list of inputs for the rule actions: a list of shell commands to run for the rule comment: a comment to put in the Makefile above the rule (also useful for making this Python script's code self-documenting) order_only: if true, makes the dependency order-only force: if true, include FORCE_DO_CMD as an order-only dep phony: if true, the rule does not actually generate the named output, the output is just a name to run the rule """ outputs = map(QuoteSpaces, outputs) inputs = map(QuoteSpaces, inputs) if comment: self.WriteLn('# ' + comment) if phony: self.WriteLn('.PHONY: ' + ' '.join(outputs)) # TODO(evanm): just make order_only a list of deps instead of these hacks. if order_only: order_insert = '| ' pick_output = ' '.join(outputs) else: order_insert = '' pick_output = outputs[0] if force: force_append = ' FORCE_DO_CMD' else: force_append = '' if actions: self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0]) self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs), force_append)) if actions: for action in actions: self.WriteLn('\t%s' % action) if not order_only and len(outputs) > 1: # If we have more than one output, a rule like # foo bar: baz # that for *each* output we must run the action, potentially # in parallel. That is not what we're trying to write -- what # we want is that we run the action once and it generates all # the files. # http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html # discusses this problem and has this solution: # 1) Write the naive rule that would produce parallel runs of # the action. # 2) Make the outputs seralized on each other, so we won't start # a parallel run until the first run finishes, at which point # we'll have generated all the outputs and we're done. self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0])) # Add a dummy command to the "extra outputs" rule, otherwise make seems to # think these outputs haven't (couldn't have?) changed, and thus doesn't # flag them as changed (i.e. include in '$?') when evaluating dependent # rules, which in turn causes do_cmd() to skip running dependent commands. self.WriteLn('%s: ;' % (' '.join(outputs[1:]))) self.WriteLn() def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps): """Write a set of LOCAL_XXX definitions for Android NDK. These variable definitions will be used by Android NDK but do nothing for non-Android applications. Arguments: module_name: Android NDK module name, which must be unique among all module names. all_sources: A list of source files (will be filtered by Compilable). link_deps: A list of link dependencies, which must be sorted in the order from dependencies to dependents. """ if self.type not in ('executable', 'shared_library', 'static_library'): return self.WriteLn('# Variable definitions for Android applications') self.WriteLn('include $(CLEAR_VARS)') self.WriteLn('LOCAL_MODULE := ' + module_name) self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) ' '$(DEFS_$(BUILDTYPE)) ' # LOCAL_CFLAGS is applied to both of C and C++. There is # no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C # sources. '$(CFLAGS_C_$(BUILDTYPE)) ' # $(INCS_$(BUILDTYPE)) includes the prefix '-I' while # LOCAL_C_INCLUDES does not expect it. So put it in # LOCAL_CFLAGS. '$(INCS_$(BUILDTYPE))') # LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred. self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))') self.WriteLn('LOCAL_C_INCLUDES :=') self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)') # Detect the C++ extension. cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0} default_cpp_ext = '.cpp' for filename in all_sources: ext = os.path.splitext(filename)[1] if ext in cpp_ext: cpp_ext[ext] += 1 if cpp_ext[ext] > cpp_ext[default_cpp_ext]: default_cpp_ext = ext self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext) self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)), 'LOCAL_SRC_FILES') # Filter out those which do not match prefix and suffix and produce # the resulting list without prefix and suffix. def DepsToModules(deps, prefix, suffix): modules = [] for filepath in deps: filename = os.path.basename(filepath) if filename.startswith(prefix) and filename.endswith(suffix): modules.append(filename[len(prefix):-len(suffix)]) return modules # Retrieve the default value of 'SHARED_LIB_SUFFIX' params = {'flavor': 'linux'} default_variables = {} CalculateVariables(default_variables, params) self.WriteList( DepsToModules(link_deps, generator_default_variables['SHARED_LIB_PREFIX'], default_variables['SHARED_LIB_SUFFIX']), 'LOCAL_SHARED_LIBRARIES') self.WriteList( DepsToModules(link_deps, generator_default_variables['STATIC_LIB_PREFIX'], generator_default_variables['STATIC_LIB_SUFFIX']), 'LOCAL_STATIC_LIBRARIES') if self.type == 'executable': self.WriteLn('include $(BUILD_EXECUTABLE)') elif self.type == 'shared_library': self.WriteLn('include $(BUILD_SHARED_LIBRARY)') elif self.type == 'static_library': self.WriteLn('include $(BUILD_STATIC_LIBRARY)') self.WriteLn() def WriteLn(self, text=''): self.fp.write(text + '\n') def GetSortedXcodeEnv(self, additional_settings=None): return gyp.xcode_emulation.GetSortedXcodeEnv( self.xcode_settings, "$(abs_builddir)", os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)", additional_settings) def GetSortedXcodePostbuildEnv(self): # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( 'CHROMIUM_STRIP_SAVE_FILE', '') # Even if strip_save_file is empty, explicitly write it. Else a postbuild # might pick up an export from an earlier target. return self.GetSortedXcodeEnv( additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file}) def WriteSortedXcodeEnv(self, target, env): for k, v in env: # For # foo := a\ b # the escaped space does the right thing. For # export foo := a\ b # it does not -- the backslash is written to the env as literal character. # So don't escape spaces in |env[k]|. self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v)) def Objectify(self, path): """Convert a path to its output directory form.""" if '$(' in path: path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset) if not '$(obj)' in path: path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path) return path def Pchify(self, path, lang): """Convert a prefix header path to its output directory form.""" path = self.Absolutify(path) if '$(' in path: path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' % (self.toolset, lang)) return path return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path) def Absolutify(self, path): """Convert a subdirectory-relative path into a base-relative path. Skips over paths that contain variables.""" if '$(' in path: # Don't call normpath in this case, as it might collapse the # path too aggressively if it features '..'. However it's still # important to strip trailing slashes. return path.rstrip('/') return os.path.normpath(os.path.join(self.path, path)) def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return path def _InstallableTargetInstallPath(self): """Returns the location of the final output for an installable target.""" # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files # rely on this. Emulate this behavior for mac. # XXX(TooTallNate): disabling this code since we don't want this behavior... #if (self.type == 'shared_library' and # (self.flavor != 'mac' or self.toolset != 'target')): # # Install all shared libs into a common directory (per toolset) for # # convenient access with LD_LIBRARY_PATH. # return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias) return '$(builddir)/' + self.alias def WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files): """Write the target to regenerate the Makefile.""" options = params['options'] build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir) for filename in params['build_files_arg']] gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'], options.toplevel_dir) if not gyp_binary.startswith(os.sep): gyp_binary = os.path.join('.', gyp_binary) root_makefile.write( "quiet_cmd_regen_makefile = ACTION Regenerating $@\n" "cmd_regen_makefile = cd $(srcdir); %(cmd)s\n" "%(makefile_name)s: %(deps)s\n" "\t$(call do_cmd,regen_makefile)\n\n" % { 'makefile_name': makefile_name, 'deps': ' '.join(map(Sourceify, build_files)), 'cmd': gyp.common.EncodePOSIXShellList( [gyp_binary, '-fmake'] + gyp.RegenerateFlags(options) + build_files_args)}) def PerformBuild(data, configurations, params): options = params['options'] for config in configurations: arguments = ['make'] if options.toplevel_dir and options.toplevel_dir != '.': arguments += '-C', options.toplevel_dir arguments.append('BUILDTYPE=' + config) print 'Building [%s]: %s' % (config, arguments) subprocess.check_call(arguments) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] flavor = gyp.common.GetFlavor(params) generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') android_ndk_version = generator_flags.get('android_ndk_version', None) default_target = generator_flags.get('default_target', 'all') def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) if options.generator_output: output_file = os.path.join( options.depth, options.generator_output, base_path, base_name) base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'Makefile' + options.suffix makefile_path = os.path.join(options.toplevel_dir, makefile_name) if options.generator_output: global srcdir_prefix makefile_path = os.path.join( options.toplevel_dir, options.generator_output, makefile_name) srcdir = gyp.common.RelativePath(srcdir, options.generator_output) srcdir_prefix = '$(srcdir)/' flock_command= 'flock' header_params = { 'default_target': default_target, 'builddir': builddir_name, 'default_configuration': default_configuration, 'flock': flock_command, 'flock_index': 1, 'link_commands': LINK_COMMANDS_LINUX, 'extra_commands': '', 'srcdir': srcdir, } if flavor == 'mac': flock_command = './gyp-mac-tool flock' header_params.update({ 'flock': flock_command, 'flock_index': 2, 'link_commands': LINK_COMMANDS_MAC, 'extra_commands': SHARED_HEADER_MAC_COMMANDS, }) elif flavor == 'android': header_params.update({ 'link_commands': LINK_COMMANDS_ANDROID, }) elif flavor == 'solaris': header_params.update({ 'flock': './gyp-flock-tool flock', 'flock_index': 2, }) elif flavor == 'freebsd': # Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific. header_params.update({ 'flock': 'lockf', }) elif flavor == 'aix': header_params.update({ 'link_commands': LINK_COMMANDS_AIX, 'flock': './gyp-flock-tool flock', 'flock_index': 2, }) header_params.update({ 'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'), 'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'), 'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'), 'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'), 'CC.host': GetEnvironFallback(('CC_host',), 'gcc'), 'AR.host': GetEnvironFallback(('AR_host',), 'ar'), 'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'), 'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'), }) build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_array = data[build_file].get('make_global_settings', []) wrappers = {} for key, value in make_global_settings_array: if key.endswith('_wrapper'): wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value make_global_settings = '' for key, value in make_global_settings_array: if re.match('.*_wrapper', key): continue if value[0] != '$': value = '$(abspath %s)' % value wrapper = wrappers.get(key) if wrapper: value = '%s %s' % (wrapper, value) del wrappers[key] if key in ('CC', 'CC.host', 'CXX', 'CXX.host'): make_global_settings += ( 'ifneq (,$(filter $(origin %s), undefined default))\n' % key) # Let gyp-time envvars win over global settings. env_key = key.replace('.', '_') # CC.host -> CC_host if env_key in os.environ: value = os.environ[env_key] make_global_settings += ' %s = %s\n' % (key, value) make_global_settings += 'endif\n' else: make_global_settings += '%s ?= %s\n' % (key, value) # TODO(ukai): define cmd when only wrapper is specified in # make_global_settings. header_params['make_global_settings'] = make_global_settings gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(SHARED_HEADER % header_params) # Currently any versions have the same effect, but in future the behavior # could be different. if android_ndk_version: root_makefile.write( '# Define LOCAL_PATH for build of Android applications.\n' 'LOCAL_PATH := $(call my-dir)\n' '\n') for toolset in toolsets: root_makefile.write('TOOLSET := %s\n' % toolset) WriteRootHeaderSuffixRules(root_makefile) # Put build-time support tools next to the root Makefile. dest_path = os.path.dirname(makefile_path) gyp.common.CopyTool(flavor, dest_path) # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) this_make_global_settings = data[build_file].get('make_global_settings', []) assert make_global_settings_array == this_make_global_settings, ( "make_global_settings needs to be the same for all targets. %s vs. %s" % (this_make_global_settings, make_global_settings)) build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir)) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] if flavor == 'mac': gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) writer = MakefileWriter(generator_flags, flavor) writer.Write(qualified_target, base_path, output_file, spec, configs, part_of_all=qualified_target in needed_targets) # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) # Write out per-gyp (sub-project) Makefiles. depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd()) for build_file in build_files: # The paths in build_files were relativized above, so undo that before # testing against the non-relativized items in target_list and before # calculating the Makefile path. build_file = os.path.join(depth_rel_path, build_file) gyp_targets = [target_dicts[target]['target_name'] for target in target_list if target.startswith(build_file) and target in needed_targets] # Only generate Makefiles for gyp files with targets. if not gyp_targets: continue base_path, output_file = CalculateMakefilePath(build_file, os.path.splitext(os.path.basename(build_file))[0] + '.Makefile') makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path), os.path.dirname(output_file)) writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets, builddir_name) # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): # We wrap each .mk include in an if statement so users can tell make to # not load a file by setting NO_LOAD. The below make code says, only # load the .mk file if the .mk filename doesn't start with a token in # NO_LOAD. root_makefile.write( "ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n" " $(findstring $(join ^,$(prefix)),\\\n" " $(join ^," + include_file + ")))),)\n") root_makefile.write(" include " + include_file + "\n") root_makefile.write("endif\n") root_makefile.write('\n') if (not generator_flags.get('standalone') and generator_flags.get('auto_regeneration', True)): WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files) root_makefile.write(SHARED_FOOTER) root_makefile.close()
mit
graehl/yaml-cpp.new-api
test/gmock-1.7.0/gtest/test/gtest_color_test.py
3259
4911
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly determines whether to use colors.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name = 'nt' COLOR_ENV_VAR = 'GTEST_COLOR' COLOR_FLAG = 'gtest_color' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_') def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def UsesColor(term, color_env_var, color_flag): """Runs gtest_color_test_ and returns its exit code.""" SetEnvVar('TERM', term) SetEnvVar(COLOR_ENV_VAR, color_env_var) if color_flag is None: args = [] else: args = ['--%s=%s' % (COLOR_FLAG, color_flag)] p = gtest_test_utils.Subprocess([COMMAND] + args) return not p.exited or p.exit_code class GTestColorTest(gtest_test_utils.TestCase): def testNoEnvVarNoFlag(self): """Tests the case when there's neither GTEST_COLOR nor --gtest_color.""" if not IS_WINDOWS: self.assert_(not UsesColor('dumb', None, None)) self.assert_(not UsesColor('emacs', None, None)) self.assert_(not UsesColor('xterm-mono', None, None)) self.assert_(not UsesColor('unknown', None, None)) self.assert_(not UsesColor(None, None, None)) self.assert_(UsesColor('linux', None, None)) self.assert_(UsesColor('cygwin', None, None)) self.assert_(UsesColor('xterm', None, None)) self.assert_(UsesColor('xterm-color', None, None)) self.assert_(UsesColor('xterm-256color', None, None)) def testFlagOnly(self): """Tests the case when there's --gtest_color but not GTEST_COLOR.""" self.assert_(not UsesColor('dumb', None, 'no')) self.assert_(not UsesColor('xterm-color', None, 'no')) if not IS_WINDOWS: self.assert_(not UsesColor('emacs', None, 'auto')) self.assert_(UsesColor('xterm', None, 'auto')) self.assert_(UsesColor('dumb', None, 'yes')) self.assert_(UsesColor('xterm', None, 'yes')) def testEnvVarOnly(self): """Tests the case when there's GTEST_COLOR but not --gtest_color.""" self.assert_(not UsesColor('dumb', 'no', None)) self.assert_(not UsesColor('xterm-color', 'no', None)) if not IS_WINDOWS: self.assert_(not UsesColor('dumb', 'auto', None)) self.assert_(UsesColor('xterm-color', 'auto', None)) self.assert_(UsesColor('dumb', 'yes', None)) self.assert_(UsesColor('xterm-color', 'yes', None)) def testEnvVarAndFlag(self): """Tests the case when there are both GTEST_COLOR and --gtest_color.""" self.assert_(not UsesColor('xterm-color', 'no', 'no')) self.assert_(UsesColor('dumb', 'no', 'yes')) self.assert_(UsesColor('xterm-color', 'no', 'auto')) def testAliasesOfYesAndNo(self): """Tests using aliases in specifying --gtest_color.""" self.assert_(UsesColor('dumb', None, 'true')) self.assert_(UsesColor('dumb', None, 'YES')) self.assert_(UsesColor('dumb', None, 'T')) self.assert_(UsesColor('dumb', None, '1')) self.assert_(not UsesColor('xterm', None, 'f')) self.assert_(not UsesColor('xterm', None, 'false')) self.assert_(not UsesColor('xterm', None, '0')) self.assert_(not UsesColor('xterm', None, 'unknown')) if __name__ == '__main__': gtest_test_utils.Main()
mit
brentdax/swift
utils/swift_build_support/swift_build_support/xcrun.py
47
1843
# swift_build_support/xcrun.py - Invoke xcrun from Python -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ---------------------------------------------------------------------------- # # Python wrappers for invoking `xcrun` on the command-line. # # ---------------------------------------------------------------------------- from __future__ import absolute_import from . import cache_util from . import shell @cache_util.cached def find(tool, sdk=None, toolchain=None): """ Return the path for the given tool, according to `xcrun --find`, using the given sdk and toolchain. If `xcrun --find` cannot find the tool, return None. """ command = ['xcrun', '--find', tool] if sdk is not None: command += ['--sdk', sdk] if toolchain is not None: command += ['--toolchain', toolchain] # `xcrun --find` prints to stderr when it fails to find the # given tool. We swallow that output with a pipe. out = shell.capture( command, stderr=shell.DEVNULL, dry_run=False, echo=False, optional=True) if out is None: return None return out.rstrip() @cache_util.cached def sdk_path(sdk): """ Return the path string for given SDK, according to `xcrun --show-sdk-path`. If `xcrun --show-sdk-path` cannot find the SDK, return None. """ command = ['xcrun', '--sdk', sdk, '--show-sdk-path'] out = shell.capture(command, dry_run=False, echo=False, optional=True) if out is None: return None return out.rstrip()
apache-2.0
Vagab0nd/SiCKRAGE
lib3/oauthlib/oauth1/rfc5849/utils.py
6
2770
# -*- coding: utf-8 -*- """ oauthlib.utils ~~~~~~~~~~~~~~ This module contains utility methods used by various parts of the OAuth spec. """ from __future__ import absolute_import, unicode_literals from oauthlib.common import quote, unicode_type, unquote try: import urllib2 except ImportError: import urllib.request as urllib2 UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789') def filter_params(target): """Decorator which filters params to remove non-oauth_* parameters Assumes the decorated method takes a params dict or list of tuples as its first argument. """ def wrapper(params, *args, **kwargs): params = filter_oauth_params(params) return target(params, *args, **kwargs) wrapper.__doc__ = target.__doc__ return wrapper def filter_oauth_params(params): """Removes all non oauth parameters from a dict or a list of params.""" is_oauth = lambda kv: kv[0].startswith("oauth_") if isinstance(params, dict): return list(filter(is_oauth, list(params.items()))) else: return list(filter(is_oauth, params)) def escape(u): """Escape a unicode string in an OAuth-compatible fashion. Per `section 3.6`_ of the spec. .. _`section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6 """ if not isinstance(u, unicode_type): raise ValueError('Only unicode objects are escapable. ' + 'Got %r of type %s.' % (u, type(u))) # Letters, digits, and the characters '_.-' are already treated as safe # by urllib.quote(). We need to add '~' to fully support rfc5849. return quote(u, safe=b'~') def unescape(u): if not isinstance(u, unicode_type): raise ValueError('Only unicode objects are unescapable.') return unquote(u) def parse_keqv_list(l): """A unicode-safe version of urllib2.parse_keqv_list""" # With Python 2.6, parse_http_list handles unicode fine return urllib2.parse_keqv_list(l) def parse_http_list(u): """A unicode-safe version of urllib2.parse_http_list""" # With Python 2.6, parse_http_list handles unicode fine return urllib2.parse_http_list(u) def parse_authorization_header(authorization_header): """Parse an OAuth authorization header into a list of 2-tuples""" auth_scheme = 'OAuth '.lower() if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme): items = parse_http_list(authorization_header[len(auth_scheme):]) try: return list(parse_keqv_list(items).items()) except (IndexError, ValueError): pass raise ValueError('Malformed authorization header')
gpl-3.0
Sorsly/subtle
google-cloud-sdk/lib/third_party/google/protobuf/timestamp_pb2.py
44
2766
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/protobuf/timestamp.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='google/protobuf/timestamp.proto', package='google.protobuf', syntax='proto3', serialized_pb=_b('\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z+github.com/golang/protobuf/ptypes/timestamp\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _TIMESTAMP = _descriptor.Descriptor( name='Timestamp', full_name='google.protobuf.Timestamp', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='seconds', full_name='google.protobuf.Timestamp.seconds', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='nanos', full_name='google.protobuf.Timestamp.nanos', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=52, serialized_end=95, ) DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), dict( DESCRIPTOR = _TIMESTAMP, __module__ = 'google.protobuf.timestamp_pb2' # @@protoc_insertion_point(class_scope:google.protobuf.Timestamp) )) _sym_db.RegisterMessage(Timestamp) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\016TimestampProtoP\001Z+github.com/golang/protobuf/ptypes/timestamp\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes')) # @@protoc_insertion_point(module_scope)
mit
sarakha63/persomov
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/restudy.py
146
1155
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class RestudyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?restudy\.dk/video/play/id/(?P<id>[0-9]+)' _TEST = { 'url': 'https://www.restudy.dk/video/play/id/1637', 'info_dict': { 'id': '1637', 'ext': 'flv', 'title': 'Leiden-frosteffekt', 'description': 'Denne video er et eksperiment med flydende kvælstof.', }, 'params': { # rtmp download 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage).strip() description = self._og_search_description(webpage).strip() formats = self._extract_smil_formats( 'https://www.restudy.dk/awsmedia/SmilDirectory/video_%s.xml' % video_id, video_id) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, }
gpl-3.0
sopier/django
django/conf/locale/pt_BR/formats.py
504
1434
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'j \d\e F \d\e Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i' YEAR_MONTH_FORMAT = r'F \d\e Y' MONTH_DAY_FORMAT = r'j \d\e F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' # '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006' # '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006' ] DATETIME_INPUT_FORMATS = [ '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
lowitty/server
libsLinux/twisted/trial/test/detests.py
8
5368
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for Deferred handling by L{twisted.trial.unittest.TestCase}. """ from __future__ import division, absolute_import from twisted.trial import unittest from twisted.internet import defer, threads, reactor from twisted.trial.util import suppress as SUPPRESS from twisted.python.util import runWithWarningsSuppressed class DeferredSetUpOK(unittest.TestCase): def setUp(self): d = defer.succeed('value') d.addCallback(self._cb_setUpCalled) return d def _cb_setUpCalled(self, ignored): self._setUpCalled = True def test_ok(self): self.failUnless(self._setUpCalled) class DeferredSetUpFail(unittest.TestCase): testCalled = False def setUp(self): return defer.fail(unittest.FailTest('i fail')) def test_ok(self): DeferredSetUpFail.testCalled = True self.fail("I should not get called") class DeferredSetUpCallbackFail(unittest.TestCase): testCalled = False def setUp(self): d = defer.succeed('value') d.addCallback(self._cb_setUpCalled) return d def _cb_setUpCalled(self, ignored): self.fail('deliberate failure') def test_ok(self): DeferredSetUpCallbackFail.testCalled = True class DeferredSetUpError(unittest.TestCase): testCalled = False def setUp(self): return defer.fail(RuntimeError('deliberate error')) def test_ok(self): DeferredSetUpError.testCalled = True class DeferredSetUpNeverFire(unittest.TestCase): testCalled = False def setUp(self): return defer.Deferred() def test_ok(self): DeferredSetUpNeverFire.testCalled = True class DeferredSetUpSkip(unittest.TestCase): testCalled = False def setUp(self): d = defer.succeed('value') d.addCallback(self._cb1) return d def _cb1(self, ignored): raise unittest.SkipTest("skip me") def test_ok(self): DeferredSetUpSkip.testCalled = True class DeferredTests(unittest.TestCase): touched = False def _cb_fail(self, reason): self.fail(reason) def _cb_error(self, reason): raise RuntimeError(reason) def _cb_skip(self, reason): raise unittest.SkipTest(reason) def _touchClass(self, ignored): self.__class__.touched = True def setUp(self): self.__class__.touched = False def test_pass(self): return defer.succeed('success') def test_passGenerated(self): self._touchClass(None) yield None test_passGenerated = runWithWarningsSuppressed( [ SUPPRESS(message="twisted.internet.defer.deferredGenerator was " "deprecated") ], defer.deferredGenerator, test_passGenerated) @defer.inlineCallbacks def test_passInlineCallbacks(self): """ Test case that is decorated with L{defer.inlineCallbacks}. """ self._touchClass(None) yield None def test_fail(self): return defer.fail(self.failureException('I fail')) def test_failureInCallback(self): d = defer.succeed('fail') d.addCallback(self._cb_fail) return d def test_errorInCallback(self): d = defer.succeed('error') d.addCallback(self._cb_error) return d def test_skip(self): d = defer.succeed('skip') d.addCallback(self._cb_skip) d.addCallback(self._touchClass) return d def test_thread(self): return threads.deferToThread(lambda : None) def test_expectedFailure(self): d = defer.succeed('todo') d.addCallback(self._cb_error) return d test_expectedFailure.todo = "Expected failure" class TimeoutTests(unittest.TestCase): timedOut = None def test_pass(self): d = defer.Deferred() reactor.callLater(0, d.callback, 'hoorj!') return d test_pass.timeout = 2 def test_passDefault(self): # test default timeout d = defer.Deferred() reactor.callLater(0, d.callback, 'hoorj!') return d def test_timeout(self): return defer.Deferred() test_timeout.timeout = 0.1 def test_timeoutZero(self): return defer.Deferred() test_timeoutZero.timeout = 0 def test_expectedFailure(self): return defer.Deferred() test_expectedFailure.timeout = 0.1 test_expectedFailure.todo = "i will get it right, eventually" def test_skip(self): return defer.Deferred() test_skip.timeout = 0.1 test_skip.skip = "i will get it right, eventually" def test_errorPropagation(self): def timedOut(err): self.__class__.timedOut = err return err d = defer.Deferred() d.addErrback(timedOut) return d test_errorPropagation.timeout = 0.1 def test_calledButNeverCallback(self): d = defer.Deferred() def neverFire(r): return defer.Deferred() d.addCallback(neverFire) d.callback(1) return d test_calledButNeverCallback.timeout = 0.1 class TestClassTimeoutAttribute(unittest.TestCase): timeout = 0.2 def setUp(self): self.d = defer.Deferred() def testMethod(self): self.methodCalled = True return self.d
mit
eppye-bots/bots
bots/xml2botsgrammar.py
1
12637
from __future__ import print_function from __future__ import unicode_literals import os import sys import atexit import copy import logging try: from xml.etree import cElementTree as ET except ImportError: from xml.etree import ElementTree as ET try: from collections import OrderedDict except: from .bots_ordereddict import OrderedDict #bots-modules from . import botslib from . import botsinit from . import botsglobal from . import inmessage from . import outmessage from . import node from .botsconfig import * ''' converts xml file to a bots grammar. Usage: c:\python27\python bots-xml2botsgrammar.py botssys/infile/test.xml botssys/infile/resultgrammar.py -cconfig Try to have a 'completely filled' xml file. ''' #************************************************************************************** #***classes used in inmessage for xml2botsgrammar. #***These classes are dynamically added to inmessage #************************************************************************************** class xmlforgrammar(inmessage.Inmessage): ''' class for ediobjects in XML. Uses ElementTree''' def initfromfile(self): filename = botslib.abspathdata(self.ta_info['filename']) self.ta_info['attributemarker'] = '__' parser = ET.XMLParser() etree = ET.ElementTree() #ElementTree: lexes, parses, makes etree; etree is quite similar to bots-node trees but conversion is needed etreeroot = etree.parse(filename, parser) self.root = self._etree2botstree(etreeroot) #convert etree to bots-nodes-tree def _use_botscontent(self,xmlnode): if self._is_record(xmlnode): if xmlnode.text is None: return False else: return not xmlnode.text.isspace() else: return True def _etree2botstree(self,xmlnode): newnode = node.Node(record=self._etreenode2botstreenode(xmlnode)) for xmlchildnode in xmlnode: #for every node in mpathtree if self._is_record(xmlchildnode): newnode.append(self._etree2botstree(xmlchildnode)) #add as a node/record else: ## remark for generating grammars: empty strings should generate a field here if self._use_botscontent(xmlchildnode): newnode.record[xmlchildnode.tag] = '1' #add as a field #convert the xml-attributes of this 'xml-field' to fields in dict with attributemarker. newnode.record.update((xmlchildnode.tag + self.ta_info['attributemarker'] + key, value) for key,value in xmlchildnode.items()) return newnode def _etreenode2botstreenode(self,xmlnode): ''' build a OrderedDict from xml-node. Add BOTSID, xml-attributes (of 'record'), xmlnode.text as BOTSCONTENT.''' build = OrderedDict((xmlnode.tag + self.ta_info['attributemarker'] + key,value) for key,value in xmlnode.items()) #convert xml attributes to fields. build['BOTSID'] = xmlnode.tag if self._use_botscontent(xmlnode): build['BOTSCONTENT'] = '1' return build def _is_record(self,xmlchildnode): return bool(len(xmlchildnode)) class xmlforgrammar_allrecords(inmessage.Inmessage): ''' class for ediobjects in XML. Uses ElementTree''' def initfromfile(self): filename = botslib.abspathdata(self.ta_info['filename']) self.ta_info['attributemarker'] = '__' parser = ET.XMLParser() etree = ET.ElementTree() #ElementTree: lexes, parses, makes etree; etree is quite similar to bots-node trees but conversion is needed etreeroot = etree.parse(filename, parser) self.root = self._etree2botstree(etreeroot) #convert etree to bots-nodes-tree def _etree2botstree(self,xmlnode): newnode = node.Node(record=self._etreenode2botstreenode(xmlnode)) for xmlchildnode in xmlnode: #for every node in mpathtree newnode.append(self._etree2botstree(xmlchildnode)) #add as a node/record return newnode def _etreenode2botstreenode(self,xmlnode): ''' build a OrderedDict from xml-node. Add BOTSID, xml-attributes (of 'record'), xmlnode.text as BOTSCONTENT.''' build = OrderedDict((xmlnode.tag + self.ta_info['attributemarker'] + key,value) for key,value in xmlnode.items()) #convert xml attributes to fields. build['BOTSID'] = xmlnode.tag if not self._is_record(xmlnode): build['BOTSCONTENT'] = '1' return build def _is_record(self,xmlchildnode): return bool(len(xmlchildnode)) #****************************************************************** #***functions for mapping****************************************** def map_treewalker(node_instance,mpath): ''' Generator function. ''' mpath.append(OrderedDict({'BOTSID':node_instance.record['BOTSID']})) for childnode in node_instance.children: yield childnode,mpath[:] for terugnode,terugmpath in map_treewalker(childnode,mpath): yield terugnode,terugmpath mpath.pop() def map_writefields(node_out,node_in,mpath): ''' als fields of this level are written to node_out. ''' mpath_with_all_fields = copy.deepcopy(mpath) #use a copy of mpath (do not want to change it) for key in node_in.record.keys(): if key in ['BOTSID','BOTSIDnr']: #skip these continue mpath_with_all_fields[-1][key] = 'dummy' #add key to the mpath node_out.put(*mpath_with_all_fields) #write all fields. #****************************************************************** #***functions to convert out-tree to grammar********************* def tree2grammar(node_instance,structure,recorddefs): structure.append({ID:node_instance.record['BOTSID'],MIN:0,MAX:99999,LEVEL:[]}) recordlist = [] for key in node_instance.record.keys(): recordlist.append([key, 'C', 256, 'AN']) if node_instance.record['BOTSID'] in recorddefs: recorddefs[node_instance.record['BOTSID']] = removedoublesfromlist(recorddefs[node_instance.record['BOTSID']] + recordlist) else: recorddefs[node_instance.record['BOTSID']] = recordlist for childnode in node_instance.children: tree2grammar(childnode,structure[-1][LEVEL],recorddefs) def removedoublesfromlist(orglist): list2return = [] for member in orglist: if member not in list2return: list2return.append(member) return list2return #****************************************************************** #***functions to write grammar to file***************************** def recorddefs2string(recorddefs,targetNamespace): result = '' for tag in sorted(recorddefs): result += "'%s%s':\n [\n"%(targetNamespace,tag) for field in recorddefs[tag]: if field[0] in ['BOTSID','BOTSCONTENT']: field[1] = 'M' result += " ['%s', '%s', %s, '%s'],\n"%(field[0],field[1],field[2],field[3]) for field in recorddefs[tag]: if field[0].startswith(tag + '__'): result += " ['%s', '%s', %s, '%s'],\n"%(field[0],field[1],field[2],field[3]) for field in recorddefs[tag]: if field[0] not in ['BOTSID','BOTSIDnr','BOTSCONTENT'] and not field[0].startswith(tag + '__'): result += " ['%s%s', '%s', %s, '%s'],\n"%(targetNamespace,field[0],field[1],field[2],field[3]) result += " ],\n" return result def structure2string(structure,targetNamespace,level=0): result = "" for segment in structure: if LEVEL in segment and segment[LEVEL]: result += level*' ' + "{ID:'%s%s',MIN:%s,MAX:%s,LEVEL:[\n"%(targetNamespace,segment[ID],segment[MIN],segment[MAX]) result += structure2string(segment[LEVEL],targetNamespace,level+1) result += level*' ' + "]},\n" else: result += level*' ' + "{ID:'%s%s',MIN:%s,MAX:%s},\n"%(targetNamespace,segment[ID],segment[MIN],segment[MAX]) return result def grammar2file(botsgrammarfilename,structure,recorddefs,targetNamespace): if targetNamespace: targetNamespace = '{' + targetNamespace + '}' result = 'structure = [\n' result += structure2string(structure,targetNamespace) result += ']\n\n' result += 'recorddefs = {\n' result += recorddefs2string(recorddefs,targetNamespace) result += "}\n" result2 = '#Generated by bots open source edi translator.\nfrom bots.botsconfig import *\n' if targetNamespace: shortNamespace = 'NS' result2 += shortNamespace + " = '" + targetNamespace + "'\n\n" result = result.replace("'" + targetNamespace,shortNamespace + "+'") result2 += result f = open(botsgrammarfilename,'wb') f.write(result2) f.close() print('grammar file is written:',botsgrammarfilename) def start(): #********command line arguments************************** usage = ''' This is "%(name)s" version %(version)s, part of Bots open source edi translator (http://bots.sourceforge.net). Creates a grammar from an xml file.' Usage:' %(name)s -c<directory> <xml_file> <xml_grammar_file> Options: -c<directory> directory for configuration files (default: config). -a all xml elements as records <xml_file> name of the xml file to read <xml_grammar_file> name of the grammar file to write '''%{'name':os.path.basename(sys.argv[0]),'version':botsglobal.version} configdir = 'config' edifile ='' botsgrammarfilename = '' allrecords = False for arg in sys.argv[1:]: if arg.startswith('-c'): configdir = arg[2:] if not configdir: print('Error: configuration directory indicated, but no directory name.') sys.exit(1) elif arg.startswith('-a'): allrecords = True elif arg in ['?', '/?','-h', '--help'] or arg.startswith('-'): print(usage) sys.exit(0) else: if not edifile: edifile = arg else: botsgrammarfilename = arg if not edifile or not botsgrammarfilename: print('Error: both edifile and grammarfile are required.') sys.exit(0) #***end handling command line arguments************************** botsinit.generalinit(configdir) #find locating of bots, configfiles, init paths etc. process_name = 'xml2botsgrammar' botsglobal.logger = botsinit.initenginelogging(process_name) atexit.register(logging.shutdown) targetNamespace = '' #******************************************************************* #***add classes for handling editype xml to inmessage #******************************************************************* if allrecords: #~ editype = 'xmlforgrammar_allrecords' inmessage.xmlforgrammar = xmlforgrammar_allrecords else: #~ editype = 'xmlforgrammar' inmessage.xmlforgrammar = xmlforgrammar #make inmessage object: read the xml file inn = inmessage.parse_edi_file(editype='xmlforgrammar',messagetype='',filename=edifile) inn.checkforerrorlist() #no exception if infile has been lexed and parsed OK else raises an error #make outmessage object; nothing is 'filled' yet. In mapping tree is filled; nothing is written to file. out = outmessage.outmessage_init(editype='xmlnocheck',messagetype='',filename='',divtext='',topartner='') #***mapping: make 'normalised' out-tree suited for writing as a grammar*************************************************** mpath_root = [OrderedDict({'BOTSID':inn.root.record['BOTSID'],'BOTSIDnr':'1'})] #handle root out.put(*mpath_root) map_writefields(out,inn.root,mpath_root) #walk tree; write results to out-tree mpath_start = [] for node_instance,mpath in map_treewalker(inn.root,mpath_start): mpath.append(OrderedDict({'BOTSID':node_instance.record['BOTSID']})) if out.get(*mpath) is None: #if node does not exist: write it. out.put(*mpath) map_writefields(out,node_instance,mpath) #***mapping is done #***convert out-tree to grammar structure = [] recorddefs = {} tree2grammar(out.root,structure,recorddefs) #***write grammar to file grammar2file(botsgrammarfilename,structure,recorddefs,targetNamespace) if __name__ == '__main__': start()
gpl-3.0
jbzdak/edx-platform
openedx/core/lib/block_cache/tests/test_block_structure_factory.py
33
4070
""" Tests for block_structure_factory.py """ # pylint: disable=protected-access from mock import patch from unittest import TestCase from ..block_structure_factory import BlockStructureFactory from .test_utils import ( MockCache, MockModulestoreFactory, MockTransformer, ChildrenMapTestMixin ) class TestBlockStructureFactory(TestCase, ChildrenMapTestMixin): """ Tests for BlockStructureFactory """ def setUp(self): super(TestBlockStructureFactory, self).setUp() self.children_map = self.SIMPLE_CHILDREN_MAP self.modulestore = MockModulestoreFactory.create(self.children_map) self.block_structure = BlockStructureFactory.create_from_modulestore( root_block_usage_key=0, modulestore=self.modulestore ) self.transformers = [MockTransformer] mock_registry = patch( 'openedx.core.lib.block_cache.transformer_registry.TransformerRegistry.get_available_plugins' ) mock_registry.return_value = {transformer.name(): transformer for transformer in self.transformers} self.addCleanup(mock_registry.stop) mock_registry.start() def add_transformers(self): """ Add each registered transformer to the block structure. Mimic collection by setting test transformer block data. """ for transformer in self.transformers: self.block_structure._add_transformer(transformer) self.block_structure.set_transformer_block_field( usage_key=0, transformer=transformer, key='test', value='{} val'.format(transformer.name()) ) def test_create_from_modulestore(self): self.assert_block_structure(self.block_structure, self.children_map) def test_not_in_cache(self): cache = MockCache() self.assertIsNone( BlockStructureFactory.create_from_cache( root_block_usage_key=0, cache=cache, transformers=self.transformers, ) ) def test_uncollected_transformers(self): cache = MockCache() # serialize the structure to cache, but without collecting any transformer data BlockStructureFactory.serialize_to_cache(self.block_structure, cache) with patch('openedx.core.lib.block_cache.block_structure_factory.logger.info') as mock_logger: # cached data does not have collected information for all registered transformers self.assertIsNone( BlockStructureFactory.create_from_cache( root_block_usage_key=0, cache=cache, transformers=self.transformers, ) ) self.assertTrue(mock_logger.called) def test_cache(self): cache = MockCache() # collect transformer data self.add_transformers() # serialize to cache BlockStructureFactory.serialize_to_cache(self.block_structure, cache) # test re-create from cache self.modulestore.get_items_call_count = 0 from_cache_block_structure = BlockStructureFactory.create_from_cache( root_block_usage_key=0, cache=cache, transformers=self.transformers, ) self.assertIsNotNone(from_cache_block_structure) self.assert_block_structure(from_cache_block_structure, self.children_map) self.assertEquals(self.modulestore.get_items_call_count, 0) def test_remove_from_cache(self): cache = MockCache() # collect transformer data self.add_transformers() # serialize to cache BlockStructureFactory.serialize_to_cache(self.block_structure, cache) # remove from cache BlockStructureFactory.remove_from_cache(root_block_usage_key=0, cache=cache) self.assertIsNone( BlockStructureFactory.create_from_cache( root_block_usage_key=0, cache=cache, transformers=self.transformers ) )
agpl-3.0
haeusser/tensorflow
tensorflow/python/kernel_tests/array_ops_test.py
18
37359
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for array_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import errors_impl from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test as test_lib class BatchMatrixTransposeTest(test_util.TensorFlowTestCase): def testNonBatchMatrix(self): matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3) expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2) with self.test_session(): transposed = array_ops.matrix_transpose(matrix) self.assertEqual((3, 2), transposed.get_shape()) self.assertAllEqual(expected_transposed, transposed.eval()) def testBatchMatrix(self): matrix_0 = [[1, 2, 3], [4, 5, 6]] matrix_0_t = [[1, 4], [2, 5], [3, 6]] matrix_1 = [[11, 22, 33], [44, 55, 66]] matrix_1_t = [[11, 44], [22, 55], [33, 66]] batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3) expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2) with self.test_session(): transposed = array_ops.matrix_transpose(batch_matrix) self.assertEqual((2, 3, 2), transposed.get_shape()) self.assertAllEqual(expected_transposed, transposed.eval()) def testNonBatchMatrixDynamicallyDefined(self): matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3) expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2) with self.test_session(): matrix_ph = array_ops.placeholder(dtypes.int32) transposed = array_ops.matrix_transpose(matrix_ph) self.assertAllEqual( expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix})) def testBatchMatrixDynamicallyDefined(self): matrix_0 = [[1, 2, 3], [4, 5, 6]] matrix_0_t = [[1, 4], [2, 5], [3, 6]] matrix_1 = [[11, 22, 33], [44, 55, 66]] matrix_1_t = [[11, 44], [22, 55], [33, 66]] batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3) expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2) with self.test_session(): batch_matrix_ph = array_ops.placeholder(dtypes.int32) transposed = array_ops.matrix_transpose(batch_matrix_ph) self.assertAllEqual( expected_transposed, transposed.eval(feed_dict={batch_matrix_ph: batch_matrix})) def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self): vector = [1, 2, 3] with self.test_session(): with self.assertRaisesRegexp(ValueError, "should be a "): array_ops.matrix_transpose(vector) class BooleanMaskTest(test_util.TensorFlowTestCase): def setUp(self): self.rng = np.random.RandomState(42) def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None): """Check equivalence between boolean_mask and numpy masking.""" if make_mask is None: make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool) arr = np.random.rand(*arr_shape) mask = make_mask(arr_shape[:ndims_mask]) masked_arr = arr[mask] with self.test_session(): masked_tensor = array_ops.boolean_mask(arr, mask) # Leading dimension size of masked_tensor is always unknown until runtime # since we don't how many elements will be kept. self.assertAllEqual(masked_tensor.get_shape()[1:], masked_arr.shape[1:]) self.assertAllClose(masked_arr, masked_tensor.eval()) def testMaskDim1ArrDim1(self): ndims_mask = 1 for arr_shape in [(1,), (2,), (3,), (10,)]: self.CheckVersusNumpy(ndims_mask, arr_shape) def testMaskDim1ArrDim2(self): ndims_mask = 1 for arr_shape in [(1, 1), (2, 2), (2, 5)]: self.CheckVersusNumpy(ndims_mask, arr_shape) def testMaskDim2ArrDim2(self): ndims_mask = 2 for arr_shape in [(1, 1), (2, 2), (2, 5)]: self.CheckVersusNumpy(ndims_mask, arr_shape) def testMaskDim2ArrDim3(self): ndims_mask = 2 for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]: self.CheckVersusNumpy(ndims_mask, arr_shape) def testEmptyInput2D(self): mask = np.array([True, False]) arr = np.array([[], []]).astype(np.float32) numpy_result = arr[mask] tf_result = array_ops.boolean_mask(arr, mask) self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:]) with self.test_session(): self.assertAllClose(numpy_result, tf_result.eval()) def testEmptyInput1D(self): mask = np.array([]).astype(bool) arr = np.array([]).astype(np.float32) numpy_result = arr[mask] tf_result = array_ops.boolean_mask(arr, mask) self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:]) with self.test_session(): self.assertAllClose(numpy_result, tf_result.eval()) def testEmptyOutput(self): make_mask = lambda shape: np.zeros(shape, dtype=bool) for ndims_mask in range(1, 4): for ndims_arr in range(ndims_mask, ndims_mask + 3): for _ in range(3): arr_shape = np.random.randint(1, 5, size=ndims_arr) self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask) def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self): # The rank of the mask tensor must be specified. This is explained # in the docstring as well. with self.test_session() as sess: ph_tensor = array_ops.placeholder(dtypes.int32, shape=None) ph_mask = array_ops.placeholder(dtypes.bool, shape=[None]) arr = np.array([[1, 2], [3, 4]]) mask = np.array([False, True]) masked_tensor = sess.run(array_ops.boolean_mask(ph_tensor, ph_mask), feed_dict={ph_tensor: arr, ph_mask: mask}) np.testing.assert_allclose(masked_tensor, arr[mask]) def testMaskDimensionsSetToNoneRaises(self): # The rank of the mask tensor must be specified. This is explained # in the docstring as well. with self.test_session(): tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2]) mask = array_ops.placeholder(dtypes.bool, shape=None) with self.assertRaisesRegexp(ValueError, "dimensions must be specified"): array_ops.boolean_mask(tensor, mask) def testMaskHasMoreDimsThanTensorRaises(self): mask = [[True, True], [False, False]] tensor = [1, 2, 3, 4] with self.test_session(): with self.assertRaisesRegexp(ValueError, "incompatible"): array_ops.boolean_mask(tensor, mask).eval() def testMaskIsScalarRaises(self): mask = True tensor = 1 with self.test_session(): with self.assertRaisesRegexp(ValueError, "mask.*scalar"): array_ops.boolean_mask(tensor, mask).eval() def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self): mask = [True, True, True] tensor = [[1, 2], [3, 4]] with self.test_session(): with self.assertRaisesRegexp(ValueError, "incompatible"): array_ops.boolean_mask(tensor, mask).eval() class OperatorShapeTest(test_util.TensorFlowTestCase): def testExpandScalar(self): scalar = "hello" scalar_expanded = array_ops.expand_dims(scalar, [0]) self.assertEqual(scalar_expanded.get_shape(), (1,)) def testSqueezeScalar(self): scalar = "hello" scalar_squeezed = array_ops.squeeze(scalar, ()) self.assertEqual(scalar_squeezed.get_shape(), ()) def testSqueezeMatrix(self): matrix = [[1, 2, 3]] matrix_squeezed = array_ops.squeeze(matrix, [0]) self.assertEqual(matrix_squeezed.get_shape(), (3)) with self.assertRaises(ValueError): matrix_squeezed = array_ops.squeeze(matrix, [1]) def testSqueezeScalarDim(self): matrix = [[1, 2, 3]] matrix_squeezed = array_ops.squeeze(matrix, 0) self.assertEqual(matrix_squeezed.get_shape(), (3)) class ReverseV2Test(test_util.TensorFlowTestCase): def testReverse0DimAuto(self): x_np = 4 for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): x_tf = array_ops.reverse_v2(x_np, []).eval() self.assertAllEqual(x_tf, x_np) def _reverse1DimAuto(self, np_dtype): x_np = np.array([1, 2, 3, 4, 5], dtype=np_dtype) for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): x_tf = array_ops.reverse_v2(x_np, [0]).eval() self.assertAllEqual(x_tf, np.asarray(x_np)[::-1]) def _reverse2DimAuto(self, np_dtype): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_dtype) for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): x_tf_1 = reverse_f(x_np, [0]).eval() x_tf_2 = reverse_f(x_np, [-2]).eval() x_tf_3 = reverse_f(x_np, [1]).eval() x_tf_4 = reverse_f(x_np, [-1]).eval() x_tf_5 = reverse_f(x_np, [1, 0]).eval() self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :]) self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :]) self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1]) self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1]) self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1]) # This is the version of reverse that uses axis indices rather than # bool tensors # TODO(b/32254538): Change this test to use array_ops.reverse def testInvalid(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) with self.test_session(): with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "is out of valid range"): array_ops.reverse_v2(x_np, [-30]).eval() with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "is out of valid range"): array_ops.reverse_v2(x_np, [2]).eval() with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "axis 0 specified more than once"): array_ops.reverse_v2(x_np, [0, -2]).eval() def testReverse1DimAuto(self): for dtype in [ np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32, np.float64, np.complex64, np.complex128 ]: self._reverse1DimAuto(dtype) def testReverse2DimAuto(self): for dtype in [ np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32, np.float64, np.complex64, np.complex128 ]: self._reverse2DimAuto(dtype) def testUnknownDims(self): reverse_v2 = array_ops.reverse_v2 data_t = array_ops.placeholder(dtypes.float32) axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3]) reverse_known_t = reverse_v2(data_t, axis_known_t) # Unlike V1 we cannot know this anymore self.assertEqual(None, reverse_known_t.get_shape().ndims) axis_unknown_t = array_ops.placeholder(dtypes.int32) reverse_unknown_t = reverse_v2(data_t, axis_unknown_t) self.assertIs(None, reverse_unknown_t.get_shape().ndims) data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None]) axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3]) reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t) self.assertEqual(2, reverse_2d_t.get_shape().ndims) def testReverseRowsOf3Channels(self): """Tests optimized code for reversing rows with last dim size = 3.""" with self.test_session(use_gpu=True): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in (1, 2): for middle_size in list(range(50)) + [100000]: x_np = np.reshape( np.arange( outer_size * middle_size * 3, dtype=np.float32), newshape=(outer_size, middle_size, 3)) x_tf = reverse_f(x_np, [1]).eval() np_answer = x_np[:, ::-1, :] self.assertAllEqual(x_tf, np_answer) def testReverseRowsOf4Channels(self): with self.test_session(use_gpu=True): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in (1, 2): for middle_size in list(range(50)) + [100000]: x_np = np.reshape( np.arange( outer_size * middle_size * 4, dtype=np.float32), newshape=(outer_size, middle_size, 4)) x_tf = reverse_f(x_np, [1]).eval() np_answer = x_np[:, ::-1, :] self.assertAllEqual(x_tf, np_answer) def testReverseColumnsOf3Channels(self): with self.test_session(use_gpu=True): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in list(range(50)) + [100000]: for middle_size in (1, 2): x_np = np.reshape( np.arange( outer_size * middle_size * 3, dtype=np.float32), newshape=(outer_size, middle_size, 3)) x_tf = reverse_f(x_np, [0]).eval() np_answer = x_np[::-1, :, :] self.assertAllEqual(x_tf, np_answer) class MeshgridTest(test_util.TensorFlowTestCase): def _compareDiff(self, x, y, use_gpu): for index in ("ij", "xy"): numpy_out = np.meshgrid(x, y, indexing=index) tf_out = array_ops.meshgrid(x, y, indexing=index) with self.test_session(use_gpu=use_gpu): for xx, yy in zip(numpy_out, tf_out): self.assertAllEqual(xx, yy.eval()) def _compareDiffType(self, n, np_dtype, use_gpu): inputs = [] for index in ("ij", "xy"): for i in range(n): x = np.linspace(-10, 10, 5).astype(np_dtype) if np_dtype in (np.complex64, np.complex128): x += 1j inputs.append(x) numpy_out = np.meshgrid(*inputs, indexing=index) with self.test_session(use_gpu=use_gpu): tf_out = array_ops.meshgrid(*inputs, indexing=index) for X, _X in zip(numpy_out, tf_out): self.assertAllEqual(X, _X.eval()) def testCompare(self): for t in (np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.complex128): self._compareDiffType(2, t, False) self._compareDiffType(3, t, False) x = [1, 2, 3] y = [4, 5] a = [[1, 1], [1, 1]] self._compareDiff(x, y, False) self._compareDiff(x, a, False) class StridedSliceChecker(object): """Check a given tensor against the numpy result.""" REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3) REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8) def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True): self.test = test self.x = math_ops.cast( constant_op.constant( x, dtype=dtypes.float32), dtype=tensor_type) self.x_np = np.array(x) self.check_type_infer = check_type_infer def __getitem__(self, spec): op = self.x.__getitem__(spec) if not isinstance(spec, (list, tuple)): spec = [spec] tensor = op.eval() # Make a numpy spec that pre-evals the tensors np_specs = [] def eval_if_tensor(x): try: return x.eval() except AttributeError: return x for s in spec: if isinstance(s, slice): start = eval_if_tensor(s.start) stop = eval_if_tensor(s.stop) step = eval_if_tensor(s.step) np_specs.append(slice(start, stop, step)) else: np_specs.append(eval_if_tensor(s)) self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor) if self.check_type_infer: self.test.assertAllEqual(tensor.shape, op.get_shape()) return tensor STRIDED_SLICE_TYPES = [dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32, dtypes.float64, dtypes.complex64] class StridedSliceTest(test_util.TensorFlowTestCase): """Test the strided slice operation with variants of slices.""" def test_basic_slice(self): for tensor_type in STRIDED_SLICE_TYPES: for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): checker = StridedSliceChecker( self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type) _ = checker[:, :, :] # Various ways of representing identity slice _ = checker[:, :, :] _ = checker[::, ::, ::] _ = checker[::1, ::1, ::1] # Not zero slice _ = checker[::1, ::5, ::2] # Reverse in each dimension independently _ = checker[::-1, :, :] _ = checker[:, ::-1, :] _ = checker[:, :, ::-1] ## negative index tests i.e. n-2 in first component _ = checker[-2::-1, :, ::1] # negative index tests i.e. n-2 in first component, non-unit stride _ = checker[-2::-1, :, ::2] # Check rank-0 examples checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type) _ = checker2[None] _ = checker2[...] _ = checker2[tuple()] def testDegenerateSlices(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR) # degenerate by offering a forward interval with a negative stride _ = checker[0:-1:-1, :, :] # degenerate with a reverse interval with a positive stride _ = checker[-1:0, :, :] # empty interval in every dimension _ = checker[-1:0, 2:2, 2:3:-1] def testEllipsis(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]] checker = StridedSliceChecker(self, raw) _ = checker[0:] # implicit ellipsis _ = checker[0:, ...] # ellipsis alone _ = checker[...] # ellipsis at end _ = checker[0:1, ...] # ellipsis at begin _ = checker[..., 0:1] # ellipsis at middle _ = checker[0:1, ..., 0:1] # multiple ellipses not allowed with self.assertRaisesRegexp(ValueError, "Multiple ellipses"): _ = checker[..., :, ...].eval() def testShrink(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw) _ = checker[:, :, :, :, 3] _ = checker[..., 3] _ = checker[:, 0] _ = checker[:, :, 0] def testTensorIndexing(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw, check_type_infer=False) bar = constant_op.constant(2) bar2 = constant_op.constant(3) _ = checker[..., bar:bar2] _ = checker[..., bar] with self.assertRaisesRegexp( TypeError, "Value passed to parameter 'begin' has DataType float32 not in " "list of allowed values"): _ = checker[..., 3.0] _ = checker[..., 3] def testExpand(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw) # new axis (followed by implicit ellipsis) _ = checker[np.newaxis] # newaxis after ellipsis _ = checker[..., np.newaxis] # newaxis in between ellipsis and explicit range _ = checker[..., np.newaxis, :] _ = checker[:, ..., np.newaxis, :, :] # Reverse final dimension with new axis _ = checker[:, :, np.newaxis, :, 2::-1] # Ellipsis in middle of two newaxis _ = checker[np.newaxis, ..., np.newaxis] def testExpandVariable(self): for use_gpu in False, True: with self.test_session(use_gpu=use_gpu): x = variables.Variable(7, dtype=dtypes.int32) x.initializer.run() y = x[None].eval() self.assertEqual(y.shape, (1,)) self.assertAllEqual(y, (7,)) def testOptimizedCases(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR_ALIGNED) # Identity _ = checker[:] # Identity _ = checker[...] # Identity _ = checker[np.newaxis, ..., np.newaxis] # First axis slice _ = checker[1:] # First axis slice _ = checker[np.newaxis, 1:] class StridedSliceShapeChecker(object): def __init__(self, x): self.x = x def __getitem__(self, spec): op = self.x.__getitem__(spec) return op.get_shape() class StridedSliceShapeTest(test_util.TensorFlowTestCase): """Test the shape inference of StridedSliceShapes.""" def testUnknown(self): with self.test_session(use_gpu=False): uncertain_tensor = array_ops.placeholder(dtypes.float32) a = StridedSliceShapeChecker(uncertain_tensor) a_slice_shape = a[...] self.assertAllEqual(a_slice_shape.ndims, None) def tensorShapeEqual(self, x, y): self.assertTrue(x is not None and y is not None or x is None and y is None) self.assertEqual(x.as_list(), y.as_list()) def testTensorShapeUncertain(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): uncertain_tensor = array_ops.placeholder( dtypes.float32, shape=(5, None, 7)) a = StridedSliceShapeChecker(uncertain_tensor) self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7])) self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None])) self.tensorShapeEqual(a[3:5, 3:4, 4], tensor_shape.TensorShape([2, None])) self.tensorShapeEqual(a[3:5, :, 5:10], tensor_shape.TensorShape([2, None, 2])) self.tensorShapeEqual(a[3:5, :, 50:3], tensor_shape.TensorShape([2, None, 0])) self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,], tensor_shape.TensorShape([2, None, 1, 0])) self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,], tensor_shape.TensorShape([2, None, 1, 0])) self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,], tensor_shape.TensorShape([2, None, 1, 0])) self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,], tensor_shape.TensorShape([1, None, 1, 0])) self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2], tensor_shape.TensorShape([5, None, 1, 4])) def testTensorValuedIndexShape(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): defined_shape_tensor = array_ops.placeholder( dtypes.float32, shape=(5, 3, 7)) index_value = array_ops.placeholder(dtypes.int32, shape=()) a = StridedSliceShapeChecker(defined_shape_tensor) self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7])) self.tensorShapeEqual(a[index_value, ::-1], tensor_shape.TensorShape([3, 7])) self.tensorShapeEqual(a[index_value, ::-2], tensor_shape.TensorShape([2, 7])) other_scalar = array_ops.placeholder(dtypes.int32, shape=()) self.tensorShapeEqual(a[index_value, other_scalar:2], tensor_shape.TensorShape([None, 7])) class GradSliceChecker(object): """Tests that we can compute a gradient for var^2.""" def __init__(self, test, sess, var, varnp): self.test = test self.sess = sess self.val = var * var self.var = var self.varnp = varnp def __getitem__(self, spec): slice_var = self.var[spec] slice_val = self.val[spec] # compute analytic 2nd derivative analytic_grad2 = 2 * slice_val dy = variables.Variable( array_ops.ones( shape=slice_var.get_shape(), dtype=dtypes.int32)) assign = dy.assign(slice_var) slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy) slice_val_grad2, = gradients_impl.gradients( slice_val_grad, dy, grad_ys=self.var) self.sess.run(assign) slice_val_grad_evaled, slice_val_grad2_evaled = ( self.sess.run([slice_val_grad, slice_val_grad2])) analytic_grad2_evaled = analytic_grad2.eval() self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled) # compute analytic gradient for slice np_val_grad = (2 * self.varnp * self.varnp) np_sliceval_grad = np.zeros(self.var.get_shape()) np_sliceval_grad[spec] = np_val_grad[spec] # verify gradient self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad) class StridedSliceGradTest(test_util.TensorFlowTestCase): """Test that strided slice's custom gradient produces correct gradients.""" def testGradient(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu) as sess: var = variables.Variable( array_ops.reshape( math_ops.range(1, 97, 1), shape=(6, 4, 4))) init = variables.global_variables_initializer() sess.run(init) grad = GradSliceChecker(self, sess, var, np.array(range(1, 97, 1)).reshape((6, 4, 4))) _ = grad[2:6:2, 1:3, 1:3] _ = grad[3:0:-2, 1:3, 1:3] _ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis] _ = grad[3:0:-2, 1:3, 2] _ = grad[:, -1, :] _ = grad[:, -2, :] with self.assertRaisesRegexp(ValueError, "out of bounds"): _ = grad[:, -200, :] with self.assertRaisesRegexp(ValueError, "out of bounds"): _ = grad[:, 200, :] def testGradientZero(self): for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu) as sess: var = variables.Variable(8) init = variables.global_variables_initializer() sess.run(init) grad = GradSliceChecker(self, sess, var, np.array(8)) _ = grad[tuple()] class StridedSliceGradTypeTest(test_util.TensorFlowTestCase): """Test varied index types and host located memory.""" def testHostVsDevice(self): with self.test_session(use_gpu=True) as sess: var2 = variables.Variable( array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1))) varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32) sess.run(variables.global_variables_initializer()) begin = constant_op.constant([0, 0, 0]) end = constant_op.constant([4, 1, 1]) strides = constant_op.constant([1, 1, 1]) foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2) sess.run(foo) def testInt64Shape(self): with self.test_session(use_gpu=True) as sess: original_dy = array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1)) original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64) sess.run(variables.global_variables_initializer()) begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64) end = constant_op.constant([4, 1, 1], dtype=dtypes.int64) strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64) dx = array_ops.strided_slice_grad(original_shape, begin, end, strides, original_dy) sess.run(dx) def testMixedIndexTypes(self): with self.test_session(use_gpu=True) as sess: original_dy = array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1)) original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64) sess.run(variables.global_variables_initializer()) begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32) end = constant_op.constant([4, 1, 1], dtype=dtypes.int64) strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64) with self.assertRaisesRegexp( TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32" " that does not match type int64 of argument 'shape'"): dx = array_ops.strided_slice_grad(original_shape, begin, end, strides, original_dy) sess.run(dx) class BenchmarkSlice(object): def __init__(self, tensor): self.tensor = tensor def __getitem__(self, x): return self.tensor[x] class StridedSliceBenchmark(test_lib.Benchmark): """Benchmark new strided slice operation on non-trivial case.""" def run_and_time(self, slice_op): variables.global_variables_initializer().run() for _ in range(10): _ = slice_op.eval() iters = 1000 t0 = time.time() for _ in range(iters): slice_op.eval() t1 = time.time() self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0) def make_variable(self): n = 256 shape = (n, n, n) items = n**3 var = variables.Variable( array_ops.reshape(math_ops.linspace(1., float(items), items), shape), dtype=dtypes.float32) return var def benchmark_strided_slice_skip(self): with session.Session(): var = self.make_variable() helper = BenchmarkSlice(var) slice_op = helper[::2, ::1, ::2] self.run_and_time(slice_op) def benchmark_strided_slice_easy(self): with session.Session(): var = self.make_variable() helper = BenchmarkSlice(var) slice_op = helper[3::1, 3::1, 3::1] self.run_and_time(slice_op) def benchmark_slice_easy(self): with session.Session(): var = self.make_variable() slice_op = var[3::1, 3::1, 3::1] self.run_and_time(slice_op) class StridedSliceAssignChecker(object): def __init__(self, test, x, tensor_type=dtypes.float32): self.tensor_type = tensor_type self.test = test self.x = math_ops.cast( constant_op.constant( x, dtype=dtypes.float32), dtype=tensor_type) self.x_np = np.array(x) def __setitem__(self, index, value): for use_gpu in [False, True]: with self.test.test_session(use_gpu=use_gpu) as sess: var = variables.Variable(self.x) sess.run(variables.initialize_variables([var])) val = sess.run(var[index].assign( constant_op.constant( value, dtype=self.tensor_type))) valnp = np.copy(self.x_np) valnp[index] = np.array(value) self.test.assertAllEqual(val, valnp) class SliceAssignTest(test_util.TensorFlowTestCase): def testInvalidSlice(self): with self.test_session() as sess: foo = constant_op.constant([1, 2, 3]) with self.assertRaisesRegexp(ValueError, "Sliced assignment" " is only supported for variables"): bar = foo[:2].assign(constant_op.constant([1, 2])) sess.run(bar) def testSliceAssign(self): for dtype in STRIDED_SLICE_TYPES: checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]], tensor_type=dtype) # Check if equal checker[:] = [[10, 20, 30], [40, 50, 60]] # Check trivial (1,1) shape tensor checker[1:2, 1:2] = [[66]] # shrinks shape changes checker[1:2, 1] = [66] checker[1, 1:2] = [66] checker[1, 1] = 66 # newaxis shape changes checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]] # shrink and newaxis checker[None, None, 0, 0:1] = [[[99]]] # Non unit strides checker[::1, ::-2] = [[3, 33], [4, 44]] # degenerate interval checker[8:10, 0] = [] checker[8:10, 8:10] = [[]] # Assign vector to scalar (rank-0) using newaxis checker2 = StridedSliceAssignChecker(self, 222) checker2[()] = 6 # no indices checker2[...] = 6 # ellipsis checker2[None] = [6] # new axis def testUninitialized(self): with self.assertRaisesRegexp( errors.FailedPreconditionError, "Attempting to use uninitialized value Variable"): with self.test_session() as sess: v = variables.Variable([1, 2]) sess.run(v[:].assign([1, 2])) class ShapeSizeRankTest(test_util.TensorFlowTestCase): def testDenseShape(self): with self.test_session(): t_value = [[0, 42], [24, 0]] self.assertAllEqual((2, 2), array_ops.shape(t_value).eval()) self.assertEqual(4, array_ops.size(t_value).eval()) self.assertEqual(2, array_ops.rank(t_value).eval()) t = constant_op.constant(t_value) self.assertAllEqual((2, 2), array_ops.shape(t).eval()) self.assertEqual(4, array_ops.size(t).eval()) self.assertEqual(2, array_ops.rank(t).eval()) def testSparseShape(self): with self.test_session(): sp_value = sparse_tensor.SparseTensorValue( indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2)) self.assertAllEqual((2, 2), array_ops.shape(sp_value).eval()) self.assertEqual(4, array_ops.size(sp_value).eval()) self.assertEqual(2, array_ops.rank(sp_value).eval()) sp = sparse_tensor.SparseTensor.from_value(sp_value) self.assertAllEqual((2, 2), array_ops.shape(sp).eval()) self.assertEqual(4, array_ops.size(sp).eval()) self.assertEqual(2, array_ops.rank(sp).eval()) class SequenceMaskTest(test_util.TensorFlowTestCase): def testExceptions(self): with self.test_session(): with self.assertRaisesRegexp(ValueError, "lengths must be 1D"): array_ops.sequence_mask([[10, 20]], [10, 20]) with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"): array_ops.sequence_mask([10, 20], [10, 20]) def testNormal(self): with self.test_session(): res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5) self.assertAllEqual(res.get_shape(), [3, 5]) self.assertAllEqual(res.eval(), [[True, False, False, False, False], [True, True, True, False, False], [True, True, False, False, False]]) # test dtype and default maxlen: res = array_ops.sequence_mask( constant_op.constant([0, 1, 4]), dtype=dtypes.float32) self.assertAllEqual(res.get_shape().as_list(), [3, None]) self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]]) def testDtypes(self): def check_dtypes(lengths_dtype, maxlen_dtype): res = array_ops.sequence_mask( constant_op.constant( [1, 3, 2], dtype=lengths_dtype), constant_op.constant( 5, dtype=maxlen_dtype)) self.assertAllEqual(res.get_shape(), [3, 5]) self.assertAllEqual(res.eval(), [[True, False, False, False, False], [True, True, True, False, False], [True, True, False, False, False]]) with self.test_session(): check_dtypes(dtypes.int32, dtypes.int32) check_dtypes(dtypes.int32, dtypes.int64) check_dtypes(dtypes.int64, dtypes.int32) check_dtypes(dtypes.int64, dtypes.int64) class ConcatSliceResourceTest(test_util.TensorFlowTestCase): def testConcatSlice(self): with self.test_session(): r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b") r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c") c = array_ops.stack([r1, r2]) s = array_ops.strided_slice(c, [1], [2]) test_ops.resource_create_op(s).run() with self.assertRaises(errors.AlreadyExistsError): test_ops.resource_create_op(r2).run() if __name__ == "__main__": test_lib.main()
apache-2.0
luckielordie/conan
conans/server/service/service.py
1
10285
import re from fnmatch import translate from conans.errors import RequestErrorException, NotFoundException, ForbiddenException, \ ConanException import os import jwt from conans.util.files import mkdir, list_folder_subdirs from conans.model.ref import PackageReference, ConanFileReference from conans.util.log import logger from conans.search.search import search_packages, _partial_match class FileUploadDownloadService(object): """Handles authorization from token and upload and download files""" def __init__(self, updown_auth_manager, base_store_folder): self.updown_auth_manager = updown_auth_manager self.base_store_folder = base_store_folder def get_file_path(self, filepath, token): try: encoded_path, _, user = self.updown_auth_manager.get_resource_info(token) if not self._valid_path(filepath, encoded_path): logger.info("Invalid path file!! %s: %s" % (user, filepath)) raise NotFoundException("File not found") logger.debug("Get file: user=%s path=%s" % (user, filepath)) file_path = os.path.normpath(os.path.join(self.base_store_folder, encoded_path)) return file_path except (jwt.ExpiredSignature, jwt.DecodeError, AttributeError): raise NotFoundException("File not found") def put_file(self, file_saver, abs_filepath, token, upload_size): """ file_saver is an object with the save() method without parameters """ try: encoded_path, filesize, user = self.updown_auth_manager.get_resource_info(token) # Check size if upload_size != filesize: logger.debug("Invalid size file!!: %s: %s" % (user, abs_filepath)) raise RequestErrorException("Bad file size") abs_encoded_path = os.path.abspath(os.path.join(self.base_store_folder, encoded_path)) if not self._valid_path(abs_filepath, abs_encoded_path): raise NotFoundException("File not found") logger.debug("Put file: %s: %s" % (user, abs_filepath)) mkdir(os.path.dirname(abs_filepath)) if os.path.exists(abs_filepath): os.remove(abs_filepath) file_saver.save(os.path.dirname(abs_filepath)) except (jwt.ExpiredSignature, jwt.DecodeError, AttributeError): raise NotFoundException("File not found") def _valid_path(self, filepath, encoded_path): if encoded_path == filepath: path = os.path.join(self.base_store_folder, encoded_path) path = os.path.normpath(path) # Protect from path outside storage "../.." if not path.startswith(self.base_store_folder): return False return True else: return False class SearchService(object): def __init__(self, authorizer, server_store, auth_user): self._authorizer = authorizer self._server_store = server_store self._auth_user = auth_user def search_packages(self, reference, query): self._authorizer.check_read_conan(self._auth_user, reference) info = search_packages(self._server_store, reference, query) return info def search_recipes(self, pattern=None, ignorecase=True): def get_ref(_pattern): if not isinstance(_pattern, ConanFileReference): try: r = ConanFileReference.loads(_pattern) except (ConanException, TypeError): r = None else: r = _pattern return r def get_folders_levels(_pattern): """If a reference with revisions is detected compare with 5 levels of subdirs""" r = get_ref(_pattern) return 5 if r and r.revision else 4 # Check directly if it is a reference ref = get_ref(pattern) if ref: # Avoid resolve latest revision if a version range is passed or we are performing a # package remove (all revisions) path = self._server_store.conan(ref, resolve_latest=False) if self._server_store.path_exists(path): return [ref] # Conan references in main storage if pattern: pattern = str(pattern) b_pattern = translate(pattern) b_pattern = re.compile(b_pattern, re.IGNORECASE) if ignorecase else re.compile(b_pattern) subdirs = list_folder_subdirs(basedir=self._server_store.store, level=get_folders_levels(pattern)) if not pattern: return sorted([ConanFileReference(*folder.split("/")) for folder in subdirs]) else: ret = [] for subdir in subdirs: conan_ref = ConanFileReference(*subdir.split("/")) if _partial_match(b_pattern, conan_ref): ret.append(conan_ref) return sorted(ret) def search(self, pattern=None, ignorecase=True): """ Get all the info about any package Attributes: pattern = wildcards like opencv/* """ references = self.search_recipes(pattern, ignorecase) filtered = [] # Filter out restricted items for conan_ref in references: try: self._authorizer.check_read_conan(self._auth_user, conan_ref) filtered.append(conan_ref) except ForbiddenException: pass return filtered class ConanService(object): """Handles authorization and expose methods for REST API""" def __init__(self, authorizer, server_store, auth_user): self._authorizer = authorizer self._server_store = server_store self._auth_user = auth_user def get_recipe_snapshot(self, reference): """Gets a dict with filepaths and the md5: {filename: md5} """ self._authorizer.check_read_conan(self._auth_user, reference) snap = self._server_store.get_recipe_snapshot(reference) if not snap: raise NotFoundException("conanfile not found") return snap def get_conanfile_download_urls(self, reference, files_subset=None): """Gets a dict with filepaths and the urls: {filename: url} """ self._authorizer.check_read_conan(self._auth_user, reference) urls = self._server_store.get_download_conanfile_urls(reference, files_subset, self._auth_user) if not urls: raise NotFoundException("conanfile not found") return urls def get_conanfile_upload_urls(self, reference, filesizes): _validate_conan_reg_filenames(list(filesizes.keys())) self._authorizer.check_write_conan(self._auth_user, reference) urls = self._server_store.get_upload_conanfile_urls(reference, filesizes, self._auth_user) return urls def remove_conanfile(self, reference): self._authorizer.check_delete_conan(self._auth_user, reference) self._server_store.remove_conanfile(reference) def remove_packages(self, reference, package_ids_filter): for package_id in package_ids_filter: ref = PackageReference(reference, package_id) self._authorizer.check_delete_package(self._auth_user, ref) if not package_ids_filter: # Remove all packages, check that we can remove conanfile self._authorizer.check_delete_conan(self._auth_user, reference) self._server_store.remove_packages(reference, package_ids_filter) def remove_conanfile_files(self, reference, files): self._authorizer.check_delete_conan(self._auth_user, reference) self._server_store.remove_conanfile_files(reference, files) # Package methods def get_package_snapshot(self, package_reference): """Gets a list with filepaths and the urls and md5: [filename: {'url': url, 'md5': md5}] """ self._authorizer.check_read_package(self._auth_user, package_reference) snap = self._server_store.get_package_snapshot(package_reference) return snap def get_package_download_urls(self, package_reference, files_subset=None): """Gets a list with filepaths and the urls and md5: [filename: {'url': url, 'md5': md5}] """ self._authorizer.check_read_package(self._auth_user, package_reference) urls = self._server_store.get_download_package_urls(package_reference, files_subset=files_subset) return urls def get_package_upload_urls(self, package_reference, filesizes): """ :param package_reference: PackageReference :param filesizes: {filepath: bytes} :return {filepath: url} """ try: self._server_store.get_recipe_snapshot(package_reference.conan) except NotFoundException: raise NotFoundException("There are no remote conanfiles like %s" % str(package_reference.conan)) self._authorizer.check_write_package(self._auth_user, package_reference) urls = self._server_store.get_upload_package_urls(package_reference, filesizes, self._auth_user) return urls def _validate_conan_reg_filenames(files): message = "Invalid conans request" # Could be partial uploads, so we can't expect for all files to be present # # conanfile and digest in files # if CONANFILE not in files: # # Log something # raise RequestErrorException("Missing %s" % CONANFILE) # if CONAN_MANIFEST not in files: # # Log something # raise RequestErrorException("Missing %s" % CONAN_MANIFEST) # All contents in same directory (from conan_id) for filename in files: if ".." in filename: # Log something raise RequestErrorException(message)
mit
jonparrott/gcloud-python
storage/google/cloud/storage/bucket.py
2
66912
# Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create / interact with Google Cloud Storage buckets.""" import base64 import copy import datetime import json import warnings import six from google.api_core import page_iterator from google.api_core import datetime_helpers from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud._helpers import _NOW from google.cloud._helpers import _rfc3339_to_datetime from google.cloud.exceptions import NotFound from google.cloud.iam import Policy from google.cloud.storage import _signing from google.cloud.storage._helpers import _PropertyMixin from google.cloud.storage._helpers import _scalar_property from google.cloud.storage._helpers import _validate_name from google.cloud.storage.acl import BucketACL from google.cloud.storage.acl import DefaultObjectACL from google.cloud.storage.blob import Blob from google.cloud.storage.blob import _get_encryption_headers from google.cloud.storage.notification import BucketNotification from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT _LOCATION_SETTER_MESSAGE = ( "Assignment to 'Bucket.location' is deprecated, as it is only " "valid before the bucket is created. Instead, pass the location " "to `Bucket.create`.") def _blobs_page_start(iterator, page, response): """Grab prefixes after a :class:`~google.cloud.iterator.Page` started. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type page: :class:`~google.cloud.api.core.page_iterator.Page` :param page: The page that was just created. :type response: dict :param response: The JSON API response for a page of blobs. """ page.prefixes = tuple(response.get('prefixes', ())) iterator.prefixes.update(page.prefixes) def _item_to_blob(iterator, item): """Convert a JSON blob to the native object. .. note:: This assumes that the ``bucket`` attribute has been added to the iterator after being created. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a blob. :rtype: :class:`.Blob` :returns: The next blob in the page. """ name = item.get('name') blob = Blob(name, bucket=iterator.bucket) blob._set_properties(item) return blob def _item_to_notification(iterator, item): """Convert a JSON blob to the native object. .. note:: This assumes that the ``bucket`` attribute has been added to the iterator after being created. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a blob. :rtype: :class:`.BucketNotification` :returns: The next notification being iterated. """ return BucketNotification.from_api_repr(item, bucket=iterator.bucket) class LifecycleRuleConditions(dict): """Map a single lifecycle rule for a bucket. See: https://cloud.google.com/storage/docs/lifecycle :type age: int :param age: (optional) apply rule action to items whos age, in days, exceeds this value. :type created_before: datetime.date :param created_before: (optional) apply rule action to items created before this date. :type is_live: bool :param is_live: (optional) if true, apply rule action to non-versioned items, or to items with no newer versions. If false, apply rule action to versioned items with at least one newer version. :type matches_storage_class: list(str), one or more of :attr:`Bucket._STORAGE_CLASSES`. :param matches_storage_class: (optional) apply rule action to items which whose storage class matches this value. :type number_of_newer_versions: int :param number_of_newer_versions: (optional) apply rule action to versioned items having N newer versions. :raises ValueError: if no arguments are passed. """ def __init__(self, age=None, created_before=None, is_live=None, matches_storage_class=None, number_of_newer_versions=None, _factory=False): conditions = {} if age is not None: conditions['age'] = age if created_before is not None: conditions['createdBefore'] = created_before.isoformat() if is_live is not None: conditions['isLive'] = is_live if matches_storage_class is not None: conditions['matchesStorageClass'] = matches_storage_class if number_of_newer_versions is not None: conditions['numNewerVersions'] = number_of_newer_versions if not _factory and not conditions: raise ValueError("Supply at least one condition") super(LifecycleRuleConditions, self).__init__(conditions) @classmethod def from_api_repr(cls, resource): """Factory: construct instance from resource. :type resource: dict :param resource: mapping as returned from API call. :rtype: :class:`LifecycleRuleConditions` :returns: Instance created from resource. """ instance = cls(_factory=True) instance.update(resource) return instance @property def age(self): """Conditon's age value.""" return self.get('age') @property def created_before(self): """Conditon's created_before value.""" before = self.get('createdBefore') if before is not None: return datetime_helpers.from_iso8601_date(before) @property def is_live(self): """Conditon's 'is_live' value.""" return self.get('isLive') @property def matches_storage_class(self): """Conditon's 'matches_storage_class' value.""" return self.get('matchesStorageClass') @property def number_of_newer_versions(self): """Conditon's 'number_of_newer_versions' value.""" return self.get('numNewerVersions') class LifecycleRuleDelete(dict): """Map a lifecycle rule deleting matching items. :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ def __init__(self, **kw): conditions = LifecycleRuleConditions(**kw) rule = { 'action': { 'type': 'Delete', }, 'condition': dict(conditions), } super(LifecycleRuleDelete, self).__init__(rule) @classmethod def from_api_repr(cls, resource): """Factory: construct instance from resource. :type resource: dict :param resource: mapping as returned from API call. :rtype: :class:`LifecycleRuleDelete` :returns: Instance created from resource. """ instance = cls(_factory=True) instance.update(resource) return instance class LifecycleRuleSetStorageClass(dict): """Map a lifecycle rule upating storage class of matching items. :type storage_class: str, one of :attr:`Bucket._STORAGE_CLASSES`. :param storage_class: new storage class to assign to matching items. :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ def __init__(self, storage_class, **kw): conditions = LifecycleRuleConditions(**kw) rule = { 'action': { 'type': 'SetStorageClass', 'storageClass': storage_class, }, 'condition': dict(conditions), } super(LifecycleRuleSetStorageClass, self).__init__(rule) @classmethod def from_api_repr(cls, resource): """Factory: construct instance from resource. :type resource: dict :param resource: mapping as returned from API call. :rtype: :class:`LifecycleRuleDelete` :returns: Instance created from resource. """ action = resource['action'] instance = cls(action['storageClass'], _factory=True) instance.update(resource) return instance class Bucket(_PropertyMixin): """A class representing a Bucket on Cloud Storage. :type client: :class:`google.cloud.storage.client.Client` :param client: A client which holds credentials and project configuration for the bucket (which requires a project). :type name: str :param name: The name of the bucket. Bucket names must start and end with a number or letter. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via this instance. """ _MAX_OBJECTS_FOR_ITERATION = 256 """Maximum number of existing objects allowed in iteration. This is used in Bucket.delete() and Bucket.make_public(). """ _STORAGE_CLASSES = ( 'MULTI_REGIONAL', 'REGIONAL', 'NEARLINE', 'COLDLINE', 'STANDARD', # alias for MULTI_REGIONAL/REGIONAL, based on location 'DURABLE_REDUCED_AVAILABILITY', # deprecated ) """Allowed values for :attr:`storage_class`. See https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass https://cloud.google.com/storage/docs/storage-classes """ def __init__(self, client, name=None, user_project=None): name = _validate_name(name) super(Bucket, self).__init__(name=name) self._client = client self._acl = BucketACL(self) self._default_object_acl = DefaultObjectACL(self) self._label_removals = set() self._user_project = user_project def __repr__(self): return '<Bucket: %s>' % (self.name,) @property def client(self): """The client bound to this bucket.""" return self._client def _set_properties(self, value): """Set the properties for the current object. :type value: dict or :class:`google.cloud.storage.batch._FutureDict` :param value: The properties to be set. """ self._label_removals.clear() return super(Bucket, self)._set_properties(value) @property def user_project(self): """Project ID to be billed for API requests made via this bucket. If unset, API requests are billed to the bucket owner. :rtype: str """ return self._user_project def blob(self, blob_name, chunk_size=None, encryption_key=None, kms_key_name=None): """Factory constructor for blob object. .. note:: This will not make an HTTP request; it simply instantiates a blob object owned by this bucket. :type blob_name: str :param blob_name: The name of the blob to be instantiated. :type chunk_size: int :param chunk_size: The size of a chunk of data whenever iterating (in bytes). This must be a multiple of 256 KB per the API specification. :type encryption_key: bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. :type kms_key_name: str :param kms_key_name: Optional resource name of KMS key used to encrypt blob's content. :rtype: :class:`google.cloud.storage.blob.Blob` :returns: The blob object created. """ return Blob(name=blob_name, bucket=self, chunk_size=chunk_size, encryption_key=encryption_key, kms_key_name=kms_key_name) def notification(self, topic_name, topic_project=None, custom_attributes=None, event_types=None, blob_name_prefix=None, payload_format=NONE_PAYLOAD_FORMAT): """Factory: create a notification resource for the bucket. See: :class:`.BucketNotification` for parameters. :rtype: :class:`.BucketNotification` """ return BucketNotification( self, topic_name, topic_project=topic_project, custom_attributes=custom_attributes, event_types=event_types, blob_name_prefix=blob_name_prefix, payload_format=payload_format, ) def exists(self, client=None): """Determines whether or not this bucket exists. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True if the bucket exists in Cloud Storage. """ client = self._require_client(client) # We only need the status code (200 or not) so we seek to # minimize the returned payload. query_params = {'fields': 'name'} if self.user_project is not None: query_params['userProject'] = self.user_project try: # We intentionally pass `_target_object=None` since fields=name # would limit the local properties. client._connection.api_request( method='GET', path=self.path, query_params=query_params, _target_object=None) # NOTE: This will not fail immediately in a batch. However, when # Batch.finish() is called, the resulting `NotFound` will be # raised. return True except NotFound: return False def create(self, client=None, project=None, location=None): """Creates current bucket. If the bucket already exists, will raise :class:`google.cloud.exceptions.Conflict`. This implements "storage.buckets.insert". If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type project: str :param project: Optional. The project under which the bucket is to be created. If not passed, uses the project set on the client. :raises ValueError: if :attr:`user_project` is set. :raises ValueError: if ``project`` is None and client's :attr:`project` is also None. :type location: str :param location: Optional. The location of the bucket. If not passed, the default location, US, will be used. See https://cloud.google.com/storage/docs/bucket-locations """ if self.user_project is not None: raise ValueError("Cannot create bucket with 'user_project' set.") client = self._require_client(client) if project is None: project = client.project if project is None: raise ValueError( "Client project not set: pass an explicit project.") query_params = {'project': project} properties = {key: self._properties[key] for key in self._changes} properties['name'] = self.name if location is not None: properties['location'] = location api_response = client._connection.api_request( method='POST', path='/b', query_params=query_params, data=properties, _target_object=self) self._set_properties(api_response) def patch(self, client=None): """Sends all changed properties in a PATCH request. Updates the ``_properties`` with the response from the backend. If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current object. """ # Special case: For buckets, it is possible that labels are being # removed; this requires special handling. if self._label_removals: self._changes.add('labels') self._properties.setdefault('labels', {}) for removed_label in self._label_removals: self._properties['labels'][removed_label] = None # Call the superclass method. return super(Bucket, self).patch(client=client) @property def acl(self): """Create our ACL on demand.""" return self._acl @property def default_object_acl(self): """Create our defaultObjectACL on demand.""" return self._default_object_acl @staticmethod def path_helper(bucket_name): """Relative URL path for a bucket. :type bucket_name: str :param bucket_name: The bucket name in the path. :rtype: str :returns: The relative URL path for ``bucket_name``. """ return '/b/' + bucket_name @property def path(self): """The URL path to this bucket.""" if not self.name: raise ValueError('Cannot determine path without bucket name.') return self.path_helper(self.name) def get_blob(self, blob_name, client=None, encryption_key=None, **kwargs): """Get a blob object by name. This will return None if the blob doesn't exist: .. literalinclude:: snippets.py :start-after: [START get_blob] :end-before: [END get_blob] If :attr:`user_project` is set, bills the API request to that project. :type blob_name: str :param blob_name: The name of the blob to retrieve. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type encryption_key: bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. See https://cloud.google.com/storage/docs/encryption#customer-supplied. :type kwargs: dict :param kwargs: Keyword arguments to pass to the :class:`~google.cloud.storage.blob.Blob` constructor. :rtype: :class:`google.cloud.storage.blob.Blob` or None :returns: The blob object if it exists, otherwise None. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project blob = Blob(bucket=self, name=blob_name, encryption_key=encryption_key, **kwargs) try: headers = _get_encryption_headers(encryption_key) response = client._connection.api_request( method='GET', path=blob.path, query_params=query_params, headers=headers, _target_object=blob, ) # NOTE: We assume response.get('name') matches `blob_name`. blob._set_properties(response) # NOTE: This will not fail immediately in a batch. However, when # Batch.finish() is called, the resulting `NotFound` will be # raised. return blob except NotFound: return None def list_blobs(self, max_results=None, page_token=None, prefix=None, delimiter=None, versions=None, projection='noAcl', fields=None, client=None): """Return an iterator used to find blobs in the bucket. If :attr:`user_project` is set, bills the API request to that project. :type max_results: int :param max_results: (Optional) Maximum number of blobs to return. :type page_token: str :param page_token: (Optional) Opaque marker for the next "page" of blobs. If not passed, will return the first page of blobs. :type prefix: str :param prefix: (Optional) prefix used to filter blobs. :type delimiter: str :param delimiter: (Optional) Delimiter, used with ``prefix`` to emulate hierarchy. :type versions: bool :param versions: (Optional) Whether object versions should be returned as separate blobs. :type projection: str :param projection: (Optional) If used, must be 'full' or 'noAcl'. Defaults to ``'noAcl'``. Specifies the set of properties to return. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each blob returned: ``'items/contentLanguage,nextPageToken'``. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` in this bucket matching the arguments. """ extra_params = {'projection': projection} if prefix is not None: extra_params['prefix'] = prefix if delimiter is not None: extra_params['delimiter'] = delimiter if versions is not None: extra_params['versions'] = versions if fields is not None: extra_params['fields'] = fields if self.user_project is not None: extra_params['userProject'] = self.user_project client = self._require_client(client) path = self.path + '/o' iterator = page_iterator.HTTPIterator( client=client, api_request=client._connection.api_request, path=path, item_to_value=_item_to_blob, page_token=page_token, max_results=max_results, extra_params=extra_params, page_start=_blobs_page_start) iterator.bucket = self iterator.prefixes = set() return iterator def list_notifications(self, client=None): """List Pub / Sub notifications for this bucket. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/list If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: list of :class:`.BucketNotification` :returns: notification instances """ client = self._require_client(client) path = self.path + '/notificationConfigs' iterator = page_iterator.HTTPIterator( client=client, api_request=client._connection.api_request, path=path, item_to_value=_item_to_notification) iterator.bucket = self return iterator def delete(self, force=False, client=None): """Delete this bucket. The bucket **must** be empty in order to submit a delete request. If ``force=True`` is passed, this will first attempt to delete all the objects / blobs in the bucket (i.e. try to empty the bucket). If the bucket doesn't exist, this will raise :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`. If ``force=True`` and the bucket contains more than 256 objects / blobs this will cowardly refuse to delete the objects (or the bucket). This is to prevent accidental bucket deletion and to prevent extremely long runtime of this method. If :attr:`user_project` is set, bills the API request to that project. :type force: bool :param force: If True, empties the bucket's objects then deletes it. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket contains more than 256 objects / blobs. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project if force: blobs = list(self.list_blobs( max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client)) if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: message = ( 'Refusing to delete bucket with more than ' '%d objects. If you actually want to delete ' 'this bucket, please delete the objects ' 'yourself before calling Bucket.delete().' ) % (self._MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) # Ignore 404 errors on delete. self.delete_blobs(blobs, on_error=lambda blob: None, client=client) # We intentionally pass `_target_object=None` since a DELETE # request has no response value (whether in a standard request or # in a batch request). client._connection.api_request( method='DELETE', path=self.path, query_params=query_params, _target_object=None) def delete_blob(self, blob_name, client=None): """Deletes a blob from the current bucket. If the blob isn't found (backend 404), raises a :class:`google.cloud.exceptions.NotFound`. For example: .. literalinclude:: snippets.py :start-after: [START delete_blob] :end-before: [END delete_blob] If :attr:`user_project` is set, bills the API request to that project. :type blob_name: str :param blob_name: A blob name to delete. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises: :class:`google.cloud.exceptions.NotFound` (to suppress the exception, call ``delete_blobs``, passing a no-op ``on_error`` callback, e.g.: .. literalinclude:: snippets.py :start-after: [START delete_blobs] :end-before: [END delete_blobs] """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project blob_path = Blob.path_helper(self.path, blob_name) # We intentionally pass `_target_object=None` since a DELETE # request has no response value (whether in a standard request or # in a batch request). client._connection.api_request( method='DELETE', path=blob_path, query_params=query_params, _target_object=None) def delete_blobs(self, blobs, on_error=None, client=None): """Deletes a list of blobs from the current bucket. Uses :meth:`delete_blob` to delete each individual blob. If :attr:`user_project` is set, bills the API request to that project. :type blobs: list :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or blob names to delete. :type on_error: callable :param on_error: (Optional) Takes single argument: ``blob``. Called called once for each blob raising :class:`~google.cloud.exceptions.NotFound`; otherwise, the exception is propagated. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises: :class:`~google.cloud.exceptions.NotFound` (if `on_error` is not passed). """ for blob in blobs: try: blob_name = blob if not isinstance(blob_name, six.string_types): blob_name = blob.name self.delete_blob(blob_name, client=client) except NotFound: if on_error is not None: on_error(blob) else: raise def copy_blob(self, blob, destination_bucket, new_name=None, client=None, preserve_acl=True, source_generation=None): """Copy the given blob to the given bucket, optionally with a new name. If :attr:`user_project` is set, bills the API request to that project. :type blob: :class:`google.cloud.storage.blob.Blob` :param blob: The blob to be copied. :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` :param destination_bucket: The bucket into which the blob should be copied. :type new_name: str :param new_name: (optional) the new name for the copied file. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type preserve_acl: bool :param preserve_acl: Optional. Copies ACL from old blob to new blob. Default: True. :type source_generation: long :param source_generation: Optional. The generation of the blob to be copied. :rtype: :class:`google.cloud.storage.blob.Blob` :returns: The new Blob. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project if source_generation is not None: query_params['sourceGeneration'] = source_generation if new_name is None: new_name = blob.name new_blob = Blob(bucket=destination_bucket, name=new_name) api_path = blob.path + '/copyTo' + new_blob.path copy_result = client._connection.api_request( method='POST', path=api_path, query_params=query_params, _target_object=new_blob, ) if not preserve_acl: new_blob.acl.save(acl={}, client=client) new_blob._set_properties(copy_result) return new_blob def rename_blob(self, blob, new_name, client=None): """Rename the given blob using copy and delete operations. If :attr:`user_project` is set, bills the API request to that project. Effectively, copies blob to the same bucket with a new name, then deletes the blob. .. warning:: This method will first duplicate the data and then delete the old blob. This means that with very large objects renaming could be a very (temporarily) costly or a very slow operation. :type blob: :class:`google.cloud.storage.blob.Blob` :param blob: The blob to be renamed. :type new_name: str :param new_name: The new name for this blob. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`Blob` :returns: The newly-renamed blob. """ same_name = blob.name == new_name new_blob = self.copy_blob(blob, self, new_name, client=client) if not same_name: blob.delete(client=client) return new_blob @property def cors(self): """Retrieve or set CORS policies configured for this bucket. See http://www.w3.org/TR/cors/ and https://cloud.google.com/storage/docs/json_api/v1/buckets .. note:: The getter for this property returns a list which contains *copies* of the bucket's CORS policy mappings. Mutating the list or one of its dicts has no effect unless you then re-assign the dict via the setter. E.g.: >>> policies = bucket.cors >>> policies.append({'origin': '/foo', ...}) >>> policies[1]['maxAgeSeconds'] = 3600 >>> del policies[0] >>> bucket.cors = policies >>> bucket.update() :setter: Set CORS policies for this bucket. :getter: Gets the CORS policies for this bucket. :rtype: list of dictionaries :returns: A sequence of mappings describing each CORS policy. """ return [copy.deepcopy(policy) for policy in self._properties.get('cors', ())] @cors.setter def cors(self, entries): """Set CORS policies configured for this bucket. See http://www.w3.org/TR/cors/ and https://cloud.google.com/storage/docs/json_api/v1/buckets :type entries: list of dictionaries :param entries: A sequence of mappings describing each CORS policy. """ self._patch_property('cors', entries) default_event_based_hold = _scalar_property('defaultEventBasedHold') """Are uploaded objects automatically placed under an even-based hold? If True, uploaded objects will be placed under an event-based hold to be released at a future time. When released an object will then begin the retention period determined by the policy retention period for the object bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets If the property is not set locally, returns ``None``. :rtype: bool or ``NoneType`` """ @property def default_kms_key_name(self): """Retrieve / set default KMS encryption key for objects in the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets :setter: Set default KMS encryption key for items in this bucket. :getter: Get default KMS encryption key for items in this bucket. :rtype: str :returns: Default KMS encryption key, or ``None`` if not set. """ encryption_config = self._properties.get('encryption', {}) return encryption_config.get('defaultKmsKeyName') @default_kms_key_name.setter def default_kms_key_name(self, value): """Set default KMS encryption key for objects in the bucket. :type value: str or None :param value: new KMS key name (None to clear any existing key). """ encryption_config = self._properties.get('encryption', {}) encryption_config['defaultKmsKeyName'] = value self._patch_property('encryption', encryption_config) @property def labels(self): """Retrieve or set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels .. note:: The getter for this property returns a dict which is a *copy* of the bucket's labels. Mutating that dict has no effect unless you then re-assign the dict via the setter. E.g.: >>> labels = bucket.labels >>> labels['new_key'] = 'some-label' >>> del labels['old_key'] >>> bucket.labels = labels >>> bucket.update() :setter: Set labels for this bucket. :getter: Gets the labels for this bucket. :rtype: :class:`dict` :returns: Name-value pairs (string->string) labelling the bucket. """ labels = self._properties.get('labels') if labels is None: return {} return copy.deepcopy(labels) @labels.setter def labels(self, mapping): """Set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels :type mapping: :class:`dict` :param mapping: Name-value pairs (string->string) labelling the bucket. """ # If any labels have been expressly removed, we need to track this # so that a future .patch() call can do the correct thing. existing = set([k for k in self.labels.keys()]) incoming = set([k for k in mapping.keys()]) self._label_removals = self._label_removals.union( existing.difference(incoming), ) # Actually update the labels on the object. self._patch_property('labels', copy.deepcopy(mapping)) @property def etag(self): """Retrieve the ETag for the bucket. See https://tools.ietf.org/html/rfc2616#section-3.11 and https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: str or ``NoneType`` :returns: The bucket etag or ``None`` if the bucket's resource has not been loaded from the server. """ return self._properties.get('etag') @property def id(self): """Retrieve the ID for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: str or ``NoneType`` :returns: The ID of the bucket or ``None`` if the bucket's resource has not been loaded from the server. """ return self._properties.get('id') @property def lifecycle_rules(self): """Retrieve or set lifecycle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. note:: The getter for this property returns a list which contains *copies* of the bucket's lifecycle rules mappings. Mutating the list or one of its dicts has no effect unless you then re-assign the dict via the setter. E.g.: >>> rules = bucket.lifecycle_rules >>> rules.append({'origin': '/foo', ...}) >>> rules[1]['rule']['action']['type'] = 'Delete' >>> del rules[0] >>> bucket.lifecycle_rules = rules >>> bucket.update() :setter: Set lifestyle rules for this bucket. :getter: Gets the lifestyle rules for this bucket. :rtype: generator(dict) :returns: A sequence of mappings describing each lifecycle rule. """ info = self._properties.get('lifecycle', {}) for rule in info.get('rule', ()): action_type = rule['action']['type'] if action_type == 'Delete': yield LifecycleRuleDelete.from_api_repr(rule) elif action_type == 'SetStorageClass': yield LifecycleRuleSetStorageClass.from_api_repr(rule) else: raise ValueError("Unknown lifecycle rule: {}".format(rule)) @lifecycle_rules.setter def lifecycle_rules(self, rules): """Set lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets :type entries: list of dictionaries :param entries: A sequence of mappings describing each lifecycle rule. """ rules = [dict(rule) for rule in rules] # Convert helpers if needed self._patch_property('lifecycle', {'rule': rules}) def clear_lifecyle_rules(self): """Set lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets """ self.lifecycle_rules = [] def add_lifecycle_delete_rule(self, **kw): """Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_delete_rule] :end-before: [END add_lifecycle_delete_rule] :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ rules = list(self.lifecycle_rules) rules.append(LifecycleRuleDelete(**kw)) self.lifecycle_rules = rules def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): """Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_set_storage_class_rule] :end-before: [END add_lifecycle_set_storage_class_rule] :type storage_class: str, one of :attr:`_STORAGE_CLASSES`. :param storage_class: new storage class to assign to matching items. :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ rules = list(self.lifecycle_rules) rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) self.lifecycle_rules = rules _location = _scalar_property('location') @property def location(self): """Retrieve location configured for this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets and https://cloud.google.com/storage/docs/bucket-locations Returns ``None`` if the property has not been set before creation, or if the bucket's resource has not been loaded from the server. :rtype: str or ``NoneType`` """ return self._location @location.setter def location(self, value): """(Deprecated) Set `Bucket.location` This can only be set at bucket **creation** time. See https://cloud.google.com/storage/docs/json_api/v1/buckets and https://cloud.google.com/storage/docs/bucket-locations .. warning:: Assignment to 'Bucket.location' is deprecated, as it is only valid before the bucket is created. Instead, pass the location to `Bucket.create`. """ warnings.warn( _LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) self._location = value def get_logging(self): """Return info about access logging for this bucket. See https://cloud.google.com/storage/docs/access-logs#status :rtype: dict or None :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` (if logging is enabled), or None (if not). """ info = self._properties.get('logging') return copy.deepcopy(info) def enable_logging(self, bucket_name, object_prefix=''): """Enable access logging for this bucket. See https://cloud.google.com/storage/docs/access-logs :type bucket_name: str :param bucket_name: name of bucket in which to store access logs :type object_prefix: str :param object_prefix: prefix for access log filenames """ info = {'logBucket': bucket_name, 'logObjectPrefix': object_prefix} self._patch_property('logging', info) def disable_logging(self): """Disable access logging for this bucket. See https://cloud.google.com/storage/docs/access-logs#disabling """ self._patch_property('logging', None) @property def metageneration(self): """Retrieve the metageneration for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: int or ``NoneType`` :returns: The metageneration of the bucket or ``None`` if the bucket's resource has not been loaded from the server. """ metageneration = self._properties.get('metageneration') if metageneration is not None: return int(metageneration) @property def owner(self): """Retrieve info about the owner of the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: dict or ``NoneType`` :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's resource has not been loaded from the server. """ return copy.deepcopy(self._properties.get('owner')) @property def project_number(self): """Retrieve the number of the project to which the bucket is assigned. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: int or ``NoneType`` :returns: The project number that owns the bucket or ``None`` if the bucket's resource has not been loaded from the server. """ project_number = self._properties.get('projectNumber') if project_number is not None: return int(project_number) @property def retention_policy_effective_time(self): """Retrieve the effective time of the bucket's retention policy. :rtype: datetime.datetime or ``NoneType`` :returns: point-in time at which the bucket's retention policy is effective, or ``None`` if the property is not set locally. """ policy = self._properties.get('retentionPolicy') if policy is not None: timestamp = policy.get('effectiveTime') if timestamp is not None: return _rfc3339_to_datetime(timestamp) @property def retention_policy_locked(self): """Retrieve whthere the bucket's retention policy is locked. :rtype: bool :returns: True if the bucket's policy is locked, or else False if the policy is not locked, or the property is not set locally. """ policy = self._properties.get('retentionPolicy') if policy is not None: return policy.get('isLocked') @property def retention_period(self): """Retrieve or set the retention period for items in the bucket. :rtype: int or ``NoneType`` :returns: number of seconds to retain items after upload or release from event-based lock, or ``None`` if the property is not set locally. """ policy = self._properties.get('retentionPolicy') if policy is not None: period = policy.get('retentionPeriod') if period is not None: return int(period) @retention_period.setter def retention_period(self, value): """Set the retention period for items in the bucket. :type value: int :param value: number of seconds to retain items after upload or release from event-based lock. :raises ValueError: if the bucket's retention policy is locked. """ policy = self._properties.setdefault('retentionPolicy', {}) if value is not None: policy['retentionPeriod'] = str(value) else: policy = None self._patch_property('retentionPolicy', policy) @property def self_link(self): """Retrieve the URI for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: str or ``NoneType`` :returns: The self link for the bucket or ``None`` if the bucket's resource has not been loaded from the server. """ return self._properties.get('selfLink') @property def storage_class(self): """Retrieve or set the storage class for the bucket. See https://cloud.google.com/storage/docs/storage-classes :setter: Set the storage class for this bucket. :getter: Gets the the storage class for this bucket. :rtype: str or ``NoneType`` :returns: If set, one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY", else ``None``. """ return self._properties.get('storageClass') @storage_class.setter def storage_class(self, value): """Set the storage class for the bucket. See https://cloud.google.com/storage/docs/storage-classes :type value: str :param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY" """ if value not in self._STORAGE_CLASSES: raise ValueError('Invalid storage class: %s' % (value,)) self._patch_property('storageClass', value) @property def time_created(self): """Retrieve the timestamp at which the bucket was created. See https://cloud.google.com/storage/docs/json_api/v1/buckets :rtype: :class:`datetime.datetime` or ``NoneType`` :returns: Datetime object parsed from RFC3339 valid timestamp, or ``None`` if the bucket's resource has not been loaded from the server. """ value = self._properties.get('timeCreated') if value is not None: return _rfc3339_to_datetime(value) @property def versioning_enabled(self): """Is versioning enabled for this bucket? See https://cloud.google.com/storage/docs/object-versioning for details. :setter: Update whether versioning is enabled for this bucket. :getter: Query whether versioning is enabled for this bucket. :rtype: bool :returns: True if enabled, else False. """ versioning = self._properties.get('versioning', {}) return versioning.get('enabled', False) @versioning_enabled.setter def versioning_enabled(self, value): """Enable versioning for this bucket. See https://cloud.google.com/storage/docs/object-versioning for details. :type value: convertible to boolean :param value: should versioning be enabled for the bucket? """ self._patch_property('versioning', {'enabled': bool(value)}) @property def requester_pays(self): """Does the requester pay for API requests for this bucket? See https://cloud.google.com/storage/docs/requester-pays for details. :setter: Update whether requester pays for this bucket. :getter: Query whether requester pays for this bucket. :rtype: bool :returns: True if requester pays for API requests for the bucket, else False. """ versioning = self._properties.get('billing', {}) return versioning.get('requesterPays', False) @requester_pays.setter def requester_pays(self, value): """Update whether requester pays for API requests for this bucket. See https://cloud.google.com/storage/docs/<DOCS-MISSING> for details. :type value: convertible to boolean :param value: should requester pay for API requests for the bucket? """ self._patch_property('billing', {'requesterPays': bool(value)}) def configure_website(self, main_page_suffix=None, not_found_page=None): """Configure website-related properties. See https://cloud.google.com/storage/docs/hosting-static-website .. note:: This (apparently) only works if your bucket name is a domain name (and to do that, you need to get approved somehow...). If you want this bucket to host a website, just provide the name of an index page and a page to use when a blob isn't found: .. literalinclude:: snippets.py :start-after: [START configure_website] :end-before: [END configure_website] You probably should also make the whole bucket public: .. literalinclude:: snippets.py :start-after: [START make_public] :end-before: [END make_public] This says: "Make the bucket public, and all the stuff already in the bucket, and anything else I add to the bucket. Just make it all public." :type main_page_suffix: str :param main_page_suffix: The page to use as the main page of a directory. Typically something like index.html. :type not_found_page: str :param not_found_page: The file to use when a page isn't found. """ data = { 'mainPageSuffix': main_page_suffix, 'notFoundPage': not_found_page, } self._patch_property('website', data) def disable_website(self): """Disable the website configuration for this bucket. This is really just a shortcut for setting the website-related attributes to ``None``. """ return self.configure_website(None, None) def get_iam_policy(self, client=None): """Retrieve the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.cloud.iam.Policy` :returns: the policy instance, based on the resource returned from the ``getIamPolicy`` API request. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project info = client._connection.api_request( method='GET', path='%s/iam' % (self.path,), query_params=query_params, _target_object=None) return Policy.from_api_repr(info) def set_iam_policy(self, policy, client=None): """Update the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type policy: :class:`google.cloud.iam.Policy` :param policy: policy instance used to update bucket's IAM policy. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.cloud.iam.Policy` :returns: the policy instance, based on the resource returned from the ``setIamPolicy`` API request. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params['userProject'] = self.user_project resource = policy.to_api_repr() resource['resourceId'] = self.path info = client._connection.api_request( method='PUT', path='%s/iam' % (self.path,), query_params=query_params, data=resource, _target_object=None) return Policy.from_api_repr(info) def test_iam_permissions(self, permissions, client=None): """API call: test permissions See https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions If :attr:`user_project` is set, bills the API request to that project. :type permissions: list of string :param permissions: the permissions to check :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: list of string :returns: the permissions returned by the ``testIamPermissions`` API request. """ client = self._require_client(client) query_params = {'permissions': permissions} if self.user_project is not None: query_params['userProject'] = self.user_project path = '%s/iam/testPermissions' % (self.path,) resp = client._connection.api_request( method='GET', path=path, query_params=query_params) return resp.get('permissions', []) def make_public(self, recursive=False, future=False, client=None): """Update bucket's ACL, granting read access to anonymous users. :type recursive: bool :param recursive: If True, this will make all blobs inside the bucket public as well. :type future: bool :param future: If True, this will make all objects created in the future public as well. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises ValueError: If ``recursive`` is True, and the bucket contains more than 256 blobs. This is to prevent extremely long runtime of this method. For such buckets, iterate over the blobs returned by :meth:`list_blobs` and call :meth:`~google.cloud.storage.blob.Blob.make_public` for each blob. """ self.acl.all().grant_read() self.acl.save(client=client) if future: doa = self.default_object_acl if not doa.loaded: doa.reload(client=client) doa.all().grant_read() doa.save(client=client) if recursive: blobs = list(self.list_blobs( projection='full', max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client)) if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: message = ( "Refusing to make public recursively with more than " "%d objects. If you actually want to make every object " "in this bucket public, iterate through the blobs " "returned by 'Bucket.list_blobs()' and call " "'make_public' on each one." ) % (self._MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) for blob in blobs: blob.acl.all().grant_read() blob.acl.save(client=client) def make_private(self, recursive=False, future=False, client=None): """Update bucket's ACL, revoking read access for anonymous users. :type recursive: bool :param recursive: If True, this will make all blobs inside the bucket private as well. :type future: bool :param future: If True, this will make all objects created in the future private as well. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises ValueError: If ``recursive`` is True, and the bucket contains more than 256 blobs. This is to prevent extremely long runtime of this method. For such buckets, iterate over the blobs returned by :meth:`list_blobs` and call :meth:`~google.cloud.storage.blob.Blob.make_private` for each blob. """ self.acl.all().revoke_read() self.acl.save(client=client) if future: doa = self.default_object_acl if not doa.loaded: doa.reload(client=client) doa.all().revoke_read() doa.save(client=client) if recursive: blobs = list(self.list_blobs( projection='full', max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client)) if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: message = ( 'Refusing to make private recursively with more than ' '%d objects. If you actually want to make every object ' "in this bucket private, iterate through the blobs " "returned by 'Bucket.list_blobs()' and call " "'make_private' on each one." ) % (self._MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) for blob in blobs: blob.acl.all().revoke_read() blob.acl.save(client=client) def generate_upload_policy( self, conditions, expiration=None, client=None): """Create a signed upload policy for uploading objects. This method generates and signs a policy document. You can use `policy documents`_ to allow visitors to a website to upload files to Google Cloud Storage without giving them direct write access. For example: .. literalinclude:: snippets.py :start-after: [START policy_document] :end-before: [END policy_document] .. _policy documents: https://cloud.google.com/storage/docs/xml-api\ /post-object#policydocument :type expiration: datetime :param expiration: Optional expiration in UTC. If not specified, the policy will expire in 1 hour. :type conditions: list :param conditions: A list of conditions as described in the `policy documents`_ documentation. :type client: :class:`~google.cloud.storage.client.Client` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: dict :returns: A dictionary of (form field name, form field value) of form fields that should be added to your HTML upload form in order to attach the signature. """ client = self._require_client(client) credentials = client._base_connection.credentials _signing.ensure_signed_credentials(credentials) if expiration is None: expiration = _NOW() + datetime.timedelta(hours=1) conditions = conditions + [ {'bucket': self.name}, ] policy_document = { 'expiration': _datetime_to_rfc3339(expiration), 'conditions': conditions, } encoded_policy_document = base64.b64encode( json.dumps(policy_document).encode('utf-8')) signature = base64.b64encode( credentials.sign_bytes(encoded_policy_document)) fields = { 'bucket': self.name, 'GoogleAccessId': credentials.signer_email, 'policy': encoded_policy_document.decode('utf-8'), 'signature': signature.decode('utf-8'), } return fields def lock_retention_policy(self, client=None): """Lock the bucket's retention policy. :raises ValueError: if the bucket has no metageneration (i.e., new or never reloaded); if the bucket has no retention policy assigned; if the bucket's retention policy is already locked. """ if 'metageneration' not in self._properties: raise ValueError( "Bucket has no retention policy assigned: try 'reload'?") policy = self._properties.get('retentionPolicy') if policy is None: raise ValueError( "Bucket has no retention policy assigned: try 'reload'?") if policy.get('isLocked'): raise ValueError("Bucket's retention policy is already locked.") client = self._require_client(client) query_params = {'ifMetagenerationMatch': self.metageneration} if self.user_project is not None: query_params['userProject'] = self.user_project path = '/b/{}/lockRetentionPolicy'.format(self.name) api_response = client._connection.api_request( method='POST', path=path, query_params=query_params, _target_object=self) self._set_properties(api_response)
apache-2.0
tanglei528/horizon
openstack_dashboard/openstack/common/gettextutils.py
23
7462
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from openstack_dashboard.openstack.common.gettextutils import _ """ import copy import gettext import logging.handlers import os import UserString _localedir = os.environ.get('openstack_dashboard'.upper() + '_LOCALEDIR') _t = gettext.translation('openstack_dashboard', localedir=_localedir, fallback=True) def _(msg): return _t.ugettext(msg) def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). """ gettext.install(domain, localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), unicode=True) """ Lazy gettext functionality. The following is an attempt to introduce a deferred way to do translations on messages in OpenStack. We attempt to override the standard _() function and % (format string) operation to build Message objects that can later be translated when we have more information. Also included is an example LogHandler that translates Messages to an associated locale, effectively allowing many logs, each with their own locale. """ def get_lazy_gettext(domain): """Assemble and return a lazy gettext function for a given domain. Factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) """ def _lazy_gettext(msg): """ Create and return a Message object encapsulating a string so that we can translate it later when needed. """ return Message(msg, domain) return _lazy_gettext class Message(UserString.UserString, object): """Class used to encapsulate translatable messages.""" def __init__(self, msg, domain): # _msg is the gettext msgid and should never change self._msg = msg self._left_extra_msg = '' self._right_extra_msg = '' self.params = None self.locale = None self.domain = domain @property def data(self): # NOTE(mrodden): this should always resolve to a unicode string # that best represents the state of the message currently localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') if self.locale: lang = gettext.translation(self.domain, localedir=localedir, languages=[self.locale], fallback=True) else: # use system locale for translations lang = gettext.translation(self.domain, localedir=localedir, fallback=True) full_msg = (self._left_extra_msg + lang.ugettext(self._msg) + self._right_extra_msg) if self.params is not None: full_msg = full_msg % self.params return unicode(full_msg) def _save_parameters(self, other): # we check for None later to see if # we actually have parameters to inject, # so encapsulate if our parameter is actually None if other is None: self.params = (other, ) else: self.params = copy.deepcopy(other) return self # overrides to be more string-like def __unicode__(self): return self.data def __str__(self): return self.data.encode('utf-8') def __getstate__(self): to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', 'domain', 'params', 'locale'] new_dict = self.__dict__.fromkeys(to_copy) for attr in to_copy: new_dict[attr] = copy.deepcopy(self.__dict__[attr]) return new_dict def __setstate__(self, state): for (k, v) in state.items(): setattr(self, k, v) # operator overloads def __add__(self, other): copied = copy.deepcopy(self) copied._right_extra_msg += other.__str__() return copied def __radd__(self, other): copied = copy.deepcopy(self) copied._left_extra_msg += other.__str__() return copied def __mod__(self, other): # do a format string to catch and raise # any possible KeyErrors from missing parameters self.data % other copied = copy.deepcopy(self) return copied._save_parameters(other) def __mul__(self, other): return self.data * other def __rmul__(self, other): return other * self.data def __getitem__(self, key): return self.data[key] def __getslice__(self, start, end): return self.data.__getslice__(start, end) def __getattribute__(self, name): # NOTE(mrodden): handle lossy operations that we can't deal with yet # These override the UserString implementation, since UserString # uses our __class__ attribute to try and build a new message # after running the inner data string through the operation. # At that point, we have lost the gettext message id and can just # safely resolve to a string instead. ops = ['capitalize', 'center', 'decode', 'encode', 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] if name in ops: return getattr(self.data, name) else: return UserString.UserString.__getattribute__(self, name) class LocaleHandler(logging.Handler): """Handler that can have a locale associated to translate Messages. A quick example of how to utilize the Message class above. LocaleHandler takes a locale and a target logging.Handler object to forward LogRecord objects to after translating the internal Message. """ def __init__(self, locale, target): """ Initialize a LocaleHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ logging.Handler.__init__(self) self.locale = locale self.target = target def emit(self, record): if isinstance(record.msg, Message): # set the locale and resolve to a string record.msg.locale = self.locale self.target.emit(record)
apache-2.0
danielneis/osf.io
api/base/urls.py
4
1524
from django.conf import settings from django.conf.urls import include, url, patterns from django.conf.urls.static import static from settings import API_BASE from website import settings as osf_settings from . import views base_pattern = '^{}'.format(API_BASE) urlpatterns = [ url(base_pattern, include(patterns('', url(r'^$', views.root, name='root'), url(r'^applications/', include('api.applications.urls', namespace='applications')), url(r'^comments/', include('api.comments.urls', namespace='comments')), url(r'^nodes/', include('api.nodes.urls', namespace='nodes')), url(r'^registrations/', include('api.registrations.urls', namespace='registrations')), url(r'^users/', include('api.users.urls', namespace='users')), url(r'^tokens/', include('api.tokens.urls', namespace='tokens')), url(r'^logs/', include('api.logs.urls', namespace='logs')), url(r'^files/', include('api.files.urls', namespace='files')), url(r'^docs/', include('rest_framework_swagger.urls')), )) ) ] if osf_settings.DEV_MODE: urlpatterns.extend([ url(r'^v2/collections/', include('api.collections.urls', namespace='collections')), ]) urlpatterns += static('/static/', document_root=settings.STATIC_ROOT) handler404 = views.error_404
apache-2.0
cwyark/micropython
tools/pydfu.py
84
17419
#!/usr/bin/env python # This file is part of the OpenMV project. # Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com> # This work is licensed under the MIT license, see the file LICENSE for # details. """This module implements enough functionality to program the STM32F4xx over DFU, without requiringdfu-util. See app note AN3156 for a description of the DFU protocol. See document UM0391 for a dscription of the DFuse file. """ from __future__ import print_function import argparse import re import struct import sys import usb.core import usb.util import zlib # VID/PID __VID = 0x0483 __PID = 0xdf11 # USB request __TIMEOUT __TIMEOUT = 4000 # DFU commands __DFU_DETACH = 0 __DFU_DNLOAD = 1 __DFU_UPLOAD = 2 __DFU_GETSTATUS = 3 __DFU_CLRSTATUS = 4 __DFU_GETSTATE = 5 __DFU_ABORT = 6 # DFU status __DFU_STATE_APP_IDLE = 0x00 __DFU_STATE_APP_DETACH = 0x01 __DFU_STATE_DFU_IDLE = 0x02 __DFU_STATE_DFU_DOWNLOAD_SYNC = 0x03 __DFU_STATE_DFU_DOWNLOAD_BUSY = 0x04 __DFU_STATE_DFU_DOWNLOAD_IDLE = 0x05 __DFU_STATE_DFU_MANIFEST_SYNC = 0x06 __DFU_STATE_DFU_MANIFEST = 0x07 __DFU_STATE_DFU_MANIFEST_WAIT_RESET = 0x08 __DFU_STATE_DFU_UPLOAD_IDLE = 0x09 __DFU_STATE_DFU_ERROR = 0x0a _DFU_DESCRIPTOR_TYPE = 0x21 # USB device handle __dev = None __verbose = None # USB DFU interface __DFU_INTERFACE = 0 import inspect if 'length' in inspect.getargspec(usb.util.get_string).args: # PyUSB 1.0.0.b1 has the length argument def get_string(dev, index): return usb.util.get_string(dev, 255, index) else: # PyUSB 1.0.0.b2 dropped the length argument def get_string(dev, index): return usb.util.get_string(dev, index) def init(): """Initializes the found DFU device so that we can program it.""" global __dev devices = get_dfu_devices(idVendor=__VID, idProduct=__PID) if not devices: raise ValueError('No DFU device found') if len(devices) > 1: raise ValueError("Multiple DFU devices found") __dev = devices[0] # Claim DFU interface usb.util.claim_interface(__dev, __DFU_INTERFACE) # Clear status clr_status() def clr_status(): """Clears any error status (perhaps left over from a previous session).""" __dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE, None, __TIMEOUT) def get_status(): """Get the status of the last operation.""" stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE, 6, 20000) # print (__DFU_STAT[stat[4]], stat) return stat[4] def mass_erase(): """Performs a MASS erase (i.e. erases the entire device.""" # Send DNLOAD with first byte=0x41 __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed") def page_erase(addr): """Erases a single page.""" if __verbose: print("Erasing page: 0x%x..." % (addr)) # Send DNLOAD with first byte=0x41 and page address buf = struct.pack("<BI", 0x41, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed") def set_address(addr): """Sets the address for the next operation.""" # Send DNLOAD with first byte=0x21 and page address buf = struct.pack("<BI", 0x21, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: set address failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: set address failed") def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0): """Writes a buffer into memory. This routine assumes that memory has already been erased. """ xfer_count = 0 xfer_bytes = 0 xfer_total = len(buf) xfer_base = addr while xfer_bytes < xfer_total: if __verbose and xfer_count % 512 == 0: print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)) if progress and xfer_count % 256 == 0: progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size) # Set mem write address set_address(xfer_base+xfer_bytes) # Send DNLOAD with fw data chunk = min(64, xfer_total-xfer_bytes) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") xfer_count += 1 xfer_bytes += chunk def write_page(buf, xfer_offset): """Writes a single page. This routine assumes that memory has already been erased. """ xfer_base = 0x08000000 # Set mem write address set_address(xfer_base+xfer_offset) # Send DNLOAD with fw data __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") if __verbose: print ("Write: 0x%x " % (xfer_base + xfer_offset)) def exit_dfu(): """Exit DFU mode, and start running the program.""" # set jump address set_address(0x08000000) # Send DNLOAD with 0 length to exit DFU __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT) try: # Execute last command if get_status() != __DFU_STATE_DFU_MANIFEST: print("Failed to reset device") # Release device usb.util.dispose_resources(__dev) except: pass def named(values, names): """Creates a dict with `names` as fields, and `values` as values.""" return dict(zip(names.split(), values)) def consume(fmt, data, names): """Parses the struct defined by `fmt` from `data`, stores the parsed fields into a named tuple using `names`. Returns the named tuple, and the data with the struct stripped off.""" size = struct.calcsize(fmt) return named(struct.unpack(fmt, data[:size]), names), data[size:] def cstring(string): """Extracts a null-terminated string from a byte array.""" return string.decode('utf-8').split('\0', 1)[0] def compute_crc(data): """Computes the CRC32 value for the data passed in.""" return 0xFFFFFFFF & -zlib.crc32(data) - 1 def read_dfu_file(filename): """Reads a DFU file, and parses the individual elements from the file. Returns an array of elements. Each element is a dictionary with the following keys: num - The element index address - The address that the element data should be written to. size - The size of the element ddata. data - The element data. If an error occurs while parsing the file, then None is returned. """ print("File: {}".format(filename)) with open(filename, 'rb') as fin: data = fin.read() crc = compute_crc(data[:-4]) elements = [] # Decode the DFU Prefix # # <5sBIB # < little endian # 5s char[5] signature "DfuSe" # B uint8_t version 1 # I uint32_t size Size of the DFU file (not including suffix) # B uint8_t targets Number of targets dfu_prefix, data = consume('<5sBIB', data, 'signature version size targets') print (" %(signature)s v%(version)d, image size: %(size)d, " "targets: %(targets)d" % dfu_prefix) for target_idx in range(dfu_prefix['targets']): # Decode the Image Prefix # # <6sBI255s2I # < little endian # 6s char[6] signature "Target" # B uint8_t altsetting # I uint32_t named bool indicating if a name was used # 255s char[255] name name of the target # I uint32_t size size of image (not incl prefix) # I uint32_t elements Number of elements in the image img_prefix, data = consume('<6sBI255s2I', data, 'signature altsetting named name ' 'size elements') img_prefix['num'] = target_idx if img_prefix['named']: img_prefix['name'] = cstring(img_prefix['name']) else: img_prefix['name'] = '' print(' %(signature)s %(num)d, alt setting: %(altsetting)s, ' 'name: "%(name)s", size: %(size)d, elements: %(elements)d' % img_prefix) target_size = img_prefix['size'] target_data, data = data[:target_size], data[target_size:] for elem_idx in range(img_prefix['elements']): # Decode target prefix # < little endian # I uint32_t element address # I uint32_t element size elem_prefix, target_data = consume('<2I', target_data, 'addr size') elem_prefix['num'] = elem_idx print(' %(num)d, address: 0x%(addr)08x, size: %(size)d' % elem_prefix) elem_size = elem_prefix['size'] elem_data = target_data[:elem_size] target_data = target_data[elem_size:] elem_prefix['data'] = elem_data elements.append(elem_prefix) if len(target_data): print("target %d PARSE ERROR" % target_idx) # Decode DFU Suffix # < little endian # H uint16_t device Firmware version # H uint16_t product # H uint16_t vendor # H uint16_t dfu 0x11a (DFU file format version) # 3s char[3] ufd 'UFD' # B uint8_t len 16 # I uint32_t crc32 dfu_suffix = named(struct.unpack('<4H3sBI', data[:16]), 'device product vendor dfu ufd len crc') print (' usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, ' 'dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x' % dfu_suffix) if crc != dfu_suffix['crc']: print("CRC ERROR: computed crc32 is 0x%08x" % crc) return data = data[16:] if data: print("PARSE ERROR") return return elements class FilterDFU(object): """Class for filtering USB devices to identify devices which are in DFU mode. """ def __call__(self, device): for cfg in device: for intf in cfg: return (intf.bInterfaceClass == 0xFE and intf.bInterfaceSubClass == 1) def get_dfu_devices(*args, **kwargs): """Returns a list of USB device which are currently in DFU mode. Additional filters (like idProduct and idVendor) can be passed in to refine the search. """ # convert to list for compatibility with newer pyusb return list(usb.core.find(*args, find_all=True, custom_match=FilterDFU(), **kwargs)) def get_memory_layout(device): """Returns an array which identifies the memory layout. Each entry of the array will contain a dictionary with the following keys: addr - Address of this memory segment last_addr - Last address contained within the memory segment. size - size of the segment, in bytes num_pages - number of pages in the segment page_size - size of each page, in bytes """ cfg = device[0] intf = cfg[(0, 0)] mem_layout_str = get_string(device, intf.iInterface) mem_layout = mem_layout_str.split('/') addr = int(mem_layout[1], 0) segments = mem_layout[2].split(',') seg_re = re.compile(r'(\d+)\*(\d+)(.)(.)') result = [] for segment in segments: seg_match = seg_re.match(segment) num_pages = int(seg_match.groups()[0], 10) page_size = int(seg_match.groups()[1], 10) multiplier = seg_match.groups()[2] if multiplier == 'K': page_size *= 1024 if multiplier == 'M': page_size *= 1024 * 1024 size = num_pages * page_size last_addr = addr + size - 1 result.append(named((addr, last_addr, size, num_pages, page_size), "addr last_addr size num_pages page_size")) addr += size return result def list_dfu_devices(*args, **kwargs): """Prints a lits of devices detected in DFU mode.""" devices = get_dfu_devices(*args, **kwargs) if not devices: print("No DFU capable devices found") return for device in devices: print("Bus {} Device {:03d}: ID {:04x}:{:04x}" .format(device.bus, device.address, device.idVendor, device.idProduct)) layout = get_memory_layout(device) print("Memory Layout") for entry in layout: print(" 0x{:x} {:2d} pages of {:3d}K bytes" .format(entry['addr'], entry['num_pages'], entry['page_size'] // 1024)) def write_elements(elements, mass_erase_used, progress=None): """Writes the indicated elements into the target memory, erasing as needed. """ mem_layout = get_memory_layout(__dev) for elem in elements: addr = elem['addr'] size = elem['size'] data = elem['data'] elem_size = size elem_addr = addr if progress: progress(elem_addr, 0, elem_size) while size > 0: write_size = size if not mass_erase_used: for segment in mem_layout: if addr >= segment['addr'] and \ addr <= segment['last_addr']: # We found the page containing the address we want to # write, erase it page_size = segment['page_size'] page_addr = addr & ~(page_size - 1) if addr + write_size > page_addr + page_size: write_size = page_addr + page_size - addr page_erase(page_addr) break write_memory(addr, data[:write_size], progress, elem_addr, elem_size) data = data[write_size:] addr += write_size size -= write_size if progress: progress(elem_addr, addr - elem_addr, elem_size) def cli_progress(addr, offset, size): """Prints a progress report suitable for use on the command line.""" width = 25 done = offset * width // size print("\r0x{:08x} {:7d} [{}{}] {:3d}% " .format(addr, size, '=' * done, ' ' * (width - done), offset * 100 // size), end="") sys.stdout.flush() if offset == size: print("") def main(): """Test program for verifying this files functionality.""" global __verbose # Parse CMD args parser = argparse.ArgumentParser(description='DFU Python Util') #parser.add_argument("path", help="file path") parser.add_argument( "-l", "--list", help="list available DFU devices", action="store_true", default=False ) parser.add_argument( "-m", "--mass-erase", help="mass erase device", action="store_true", default=False ) parser.add_argument( "-u", "--upload", help="read file from DFU device", dest="path", default=False ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true", default=False ) args = parser.parse_args() __verbose = args.verbose if args.list: list_dfu_devices(idVendor=__VID, idProduct=__PID) return init() if args.mass_erase: print ("Mass erase...") mass_erase() if args.path: elements = read_dfu_file(args.path) if not elements: return print("Writing memory...") write_elements(elements, args.mass_erase, progress=cli_progress) print("Exiting DFU...") exit_dfu() return print("No command specified") if __name__ == '__main__': main()
mit
chleh/pampel
src/pampel.py
1
47231
#!/usr/bin/python # # This file is part of pampel. # # pampel is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pampel is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pampel. If not, see <http://www.gnu.org/licenses/>. # # TODO warn when too lenient import pygit2 as git import argparse import fnmatch import shutil import os import sys import re import subprocess import datetime import json import imp import functools import numbers import six from contextlib import contextmanager from threading import Thread PROG = os.path.basename(sys.argv[0]) or "pampel" out_dir_re = re.compile("out($|-?)") # output dirs are either "out" or "out-*" author = git.Signature(PROG, PROG) committer = author pampel_commit_re = re.compile(r'pampel [[]run #([0-9]+)[]]$', re.M) # see http://stackoverflow.com/questions/14986490/python-change-sys-stdout-print-to-custom-print-function class _RedirectPrint(): def __init__(self, stdout): self.old_stdout = stdout self.nl = True def write(self, text): for l in text.splitlines(True): if self.nl: self.old_stdout.write('> ' + l) else: self.old_stdout.write(l) self.nl = l.endswith("\n") @contextmanager def _redirect_12(): old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _RedirectPrint(old_stdout) sys.stdout = _RedirectPrint(old_stderr) try: yield finally: sys.stdout = old_stdout sys.stderr = old_stderr class LL: FATAL = 0 ERROR = 1 WARN = 2 INFO = 3 VERBOSE = 4 DEBUG = 5 TRACE = 6 def _color(name): if name == "black": c = "30" elif name == "red": c = "31" elif name == "green": c = "32" elif name == "yellow": c = "33" elif name == "blue": c = "34" elif name == "magenta": c = "35" elif name == "cyan": c = "36" elif name == "white": c = "37" elif name == "none": c = "0" return '\033[' + c + 'm' _LOGCOLORS = [ _color("red"), _color("red"), _color("red"), '', '', '', '' ] if "_LOGLEVEL" not in globals(): _LOGLEVEL = LL.INFO if "_COLORED_OUTPUT" not in globals(): _COLORED_OUTPUT = "auto" if "_OUT_TTY" not in globals(): _OUT_TTY = sys.stderr.isatty() def is_colored(): return _COLORED_OUTPUT == "always" \ or (_COLORED_OUTPUT == "auto" and _OUT_TTY) def _general_log(minlevel, prefix, c, *args): indent = len(prefix) msg = " ".join(map(str, args)) cont_prefix = " " * (indent-1) + "> " # c = _LOGCOLORS[minlevel] if is_colored() else '' if not is_colored(): c = "" prefix = c + prefix + (" " if indent > 0 else "") if _LOGLEVEL >= minlevel: first_line = True for line in msg.splitlines(True): if first_line: sys.stderr.write(prefix + line) first_line = False else: sys.stderr.write(cont_prefix + line) if c: sys.stderr.write(_color("none")) sys.stderr.write("\n") def fatallog(*args): _general_log(LL.FATAL, "[FTL]", _LOGCOLORS[LL.FATAL], *args) def errorlog(*args): _general_log(LL.ERROR, "[ERR]", _LOGCOLORS[LL.ERROR], *args) def warnlog(*args): _general_log(LL.WARN, "[WRN]", _LOGCOLORS[LL.WARN], *args) def infolog(*args): _general_log(LL.INFO, "[INF]", _LOGCOLORS[LL.INFO], *args) def verblog(*args): _general_log(LL.VERBOSE, "[DBG]", _LOGCOLORS[LL.VERBOSE], *args) def debuglog(*args): _general_log(LL.DEBUG, "[DBG]", _LOGCOLORS[LL.DEBUG], *args) def tracelog(*args): _general_log(LL.TRACE, "[TRC]", _LOGCOLORS[LL.TRACE], *args) def colorlog(clr, *args): c = _color(clr) if is_colored() else '' sys.stderr.write( c + " ".join(map(str, args)) + (_color("none") if c else "") + "\n" ) def goodlog(*args): _general_log(LL.WARN, "", _color("green"), *args) def badlog(*args): _general_log(LL.WARN, "", _color("red"), *args) class JsonFormattedEncoder: def __init__(self, indent=0): self.indent = indent self.encoder = json.JSONEncoder(sort_keys=True, indent=2, default=self.__class__.default) self.reset() def reset(self): self.in_obj = False self.json_str = "" def start_obj(self): assert not self.in_obj self.in_obj = True self.first_prop = True self.blank = False self.json_str += " " * self.indent + "{\n" self.indent += 1 def end_obj(self): assert self.in_obj self.in_obj = False self.indent -= 1 self.json_str += "\n" + " " * self.indent + "}\n" def add_sep(self): if self.first_prop: self.first_prop = False else: self.json_str += ",\n" if self.blank: self.json_str += "\n" def blank_line(self): self.blank = True def add_props_table(self, kvs): assert self.in_obj self.add_sep() # get longest key maxlen = 0 for kv in kvs: if kv is not None: maxlen = max(maxlen, len(kv[0])) maxlen += 4 # acount for quotation marks and colon key_fmt = "{{:{0}}}".format(maxlen) need_sep = False for kv in kvs: if need_sep: self.json_str += ",\n" need_sep = False if kv is None: self.json_str += "\n" continue k, v = kv self.json_str += " " * self.indent + key_fmt.format("\"" + k + "\": ") val_str = self.encoder.encode(v) first_line = True for l in val_str.splitlines(True): if first_line: self.json_str += l first_line = False else: self.json_str += " " * self.indent + l need_sep = True self.first_prop = False def add_prop(self, k, v): self.add_props_table([(k, v)]) def add_encoded_prop(self, k, val_str): assert isinstance(val_str, str) assert self.in_obj self.add_sep() self.json_str += " " * self.indent + "\"{0}\": ".format(k) first_line = True for l in val_str.splitlines(): if first_line: self.json_str += l first_line = False else: self.json_str += "\n" + " " * self.indent + l def encode(self, *args): return self.encoder.encode(*args) def __str__(self): assert not self.in_obj assert json.loads(self.json_str) is not None return self.json_str @staticmethod def default(o): if isinstance(o, datetime.datetime): return str(o) elif isinstance(o, datetime.timedelta): return str(o) elif isinstance(o, git.Oid): return str(o) else: raise TypeError("Don't know how to generate a serializable object out of type {0}".format(type(o))) def commit_to_json(commit): first, rest = commit.message.split("\n", 1) # debuglog("{} -- {}".format(first, commit.id)) return json.loads(rest) class PampelConfig: def __init__(self, repo): self.aux_repos = [] self.run_count = 0 self.snapshots = [] self.command = [] self.import_script = False self.repo = repo def __enter__(self): self.load() return self def __exit__(self, typ, val, traceback): self.save() def set_attrs(self, kv): for k, t in [ ("run_count", int), ("aux_repos", list), ("snapshots", list), ("command", list), ("import_script", bool) ]: if k in kv: if isinstance(kv[k], t): setattr(self, k, kv[k]) else: raise ValueError("attribute {0} has type {1} but {2} is expected".format(k, type(kv[k]), t)) def load(self): p = os.path.join(self.repo.workdir, ".pampel.json") try: self.set_attrs(json.load(open(p, "r"))) except FileNotFoundError: pass def save(self): d = {} for k, v in self.__dict__.items(): if k != "repo": d[k] = v p = os.path.join(self.repo.workdir, ".pampel.json") debuglog("saving {0}".format(p)) json.dump(d, open(p, "w"), indent=2, sort_keys=True) def stage(self): p = os.path.join(self.repo.workdir, ".pampel.json") debuglog("staging {0}".format(p)) idx = self.repo.index # TODO less read/write idx.read() idx.add(".pampel.json") idx.write() def add_snapshot(self, path): try: repo = git.discover_repository(path) except KeyError: pass else: raise KeyError("path `{0}' already belongs to a git repository: {1}".format(path, repo)) # TODO warning message: shared repo stat = subprocess.call(["git", "clone", "--shared", self.repo.workdir, path]) assert stat == 0 self.snapshots.append(os.path.realpath(path)) def remove_snapshot(self, path): # TODO check that snapshot id clean try: snapshot = git.Repository(path) except Exception as e: errorlog("could not open repository at {} ({}):\n{}".format(path, type(e).__name__, e)) raise # check that snapshot really is shared with repo for l in open(os.path.join(snapshot.path, "objects", "info", "alternates"), "r"): l = l.rstrip("\n\r") if not l: break p1 = os.path.realpath(os.path.join(self.repo.path, "objects")) p2 = os.path.realpath(l) debuglog("p1 {}".format(p1)) debuglog("p2 {}".format(p2)) assert os.path.realpath(os.path.join(self.repo.path, "objects")) == os.path.realpath(l) # TODO: ask user for confirmation shutil.rmtree(path) self.snapshots.remove(os.path.realpath(path)) def create_snapshot(self, path, commit, branch_name): if os.path.lexists(path): errorlog("snapshot path already exists: {}".format(path)) assert False os.makedirs(path) self.add_snapshot(path) new_repo = git.Repository(path) # TODO switch to revparse in similar cases ncom = new_repo.revparse_single(str(commit.id)) br = new_repo.create_branch(branch_name, ncom) new_repo.checkout(br) def get_snapshot_dir(props, run_info, tag, param_map=None, value_map=None): get_param = lambda k: param_map[k] if param_map is not None else k get_value = lambda k, v: value_map[k][v] if value_map is not None else v run_str = "{:03}".format(run_info["run count"]) if tag: parts = [ tag ] else: parts = [] parts += [ "{}={}".format(get_param(k), get_value(k, v)) for k, v in sorted(props.items()) ] par_str = " ".join(parts) dirname = run_str if par_str: dirname += " " + par_str else: dirname = "pampel-run-{}".format(run_str) return dirname def is_repo_clean(repo): for f, s in repo.status().items(): if s != git.GIT_STATUS_IGNORED: d = f.split(os.sep)[0] if s != git.GIT_STATUS_WT_DELETED or not out_dir_re.match(d): debuglog("path `{0}' is not clean".format(f)) return False return True def git_status_to_str(s): if s == git.GIT_STATUS_CURRENT: return "current" elif s == git.GIT_STATUS_IGNORED: return "ignored" elif s == git.GIT_STATUS_INDEX_MODIFIED: return "index modified" elif s == git.GIT_STATUS_INDEX_DELETED: return "index deleted" elif s == git.GIT_STATUS_INDEX_NEW: return "index new" elif s == git.GIT_STATUS_WT_MODIFIED: return "wt modified" elif s == git.GIT_STATUS_WT_DELETED: return "wt deleted" elif s == git.GIT_STATUS_WT_NEW: return "wt new" raise ValueError("unkown git status: {0}".format(s)) # this function ignores deleted files in output directories def is_repo_clean_output(repo): for f, s in repo.status().items(): if s != git.GIT_STATUS_IGNORED: d = f.split(os.sep)[0] if s != git.GIT_STATUS_WT_DELETED and out_dir_re.match(d): debuglog("path `{0}' is not clean ({1})".format(f, git_status_to_str(s))) return False return True def check_command_output(repo): """ Check that the external command has only written to output directories """ for f, s in repo.status().items(): if s != git.GIT_STATUS_IGNORED: d = f.split(os.sep)[0] if f != ".pampel.json" and not out_dir_re.match(d): debuglog("path `{0}' is not clean ({1})".format(f, git_status_to_str(s))) return False return True def git_stage(repo): idx = repo.index idx.read() for f, s in repo.status().items(): if s == git.GIT_STATUS_IGNORED: continue if f == ".pampel-commit": debuglog("removing {}".format(f)) os.remove(f) continue debuglog("staging path `{0}'".format(f, s)) if s == git.GIT_STATUS_WT_DELETED: idx.remove(f) else: idx.add(f) idx.write() def git_stage_output_dirs(repo): idx = repo.index idx.read() for f, s in repo.status().items(): if s == git.GIT_STATUS_IGNORED: continue tracelog("pre stg path {0}, {1}".format(s, f)) d = f.split("/")[0] if out_dir_re.match(d) and not os.path.isdir(f): debuglog("staging path {0}".format(f, s)) if s == git.GIT_STATUS_WT_DELETED: idx.remove(f) else: idx.add(f) idx.write() def clear_output_dirs(repo): infolog("clearing output directories") for d in os.listdir(repo.workdir): if not out_dir_re.match(d): continue p = os.path.join(repo.workdir, d) debuglog("clr {0}".format(p)) if os.path.isdir(p): for sd in os.listdir(p): sp = os.path.join(p, sd) if os.path.isdir(sp): shutil.rmtree(sp) else: os.remove(sp) # TODO: introduce "hidden" parameters which are not considered for snapshot # names # get short but unique string representation for each parameter name def format_params(params): min_key_len = 3 tree = [0, {}] # build tree for k in params: assert isinstance(k, str) level = tree for c in k: if c not in level[1]: level[1][c] = [1, {}] else: level[1][c][0] += 1 level = level[1][c] param_map = {} for k in params: level = tree n = 0 for c in k: n += 1 level = level[1][c] if not level[1]: break if level[0] == 1 and n >= min_key_len: break param_map[k] = k[0:n] debuglog("param_map", param_map) return param_map # get short but unique string representation for each parameter value def format_values(params, pvs): min_prec = 4 value_map = {} # { param: { value: value_str, ... }, ... } for p in params: v2s = {} # maps a value to its string representation strs = set() # all string representations for the current parameter's values loop_count = 0 max_loop = 1 val_type = None while True: v2s.clear() strs.clear() for pv in pvs: if p in pv: v = pv[p] if v in v2s: continue if isinstance(v, numbers.Number): if loop_count == 0: val_type = numbers.Number max_loop = 18 assert val_type == numbers.Number fmt = "{{:.{}g}}".format(loop_count+min_prec) vstr = fmt.format(v) else: if loop_count == 0: val_type = type(v) assert isinstance(v, val_type) # TODO optimize: fewer evaluations of vstr, remove # common prefixes/suffixes over all vstr's vstr = re.sub(r"\s+", " ", str(v)).strip() vstr = re.sub(r"[^\w ,;.+-]+", "_", vstr) if loop_count == 0: tracelog("vstr:", vstr) if loop_count == 0: max_loop = len(vstr) - min_prec vstr = vstr[:loop_count + min_prec] if vstr in strs: # value string already present --> next attempt break else: strs.add(vstr) v2s[v] = vstr else: # for loop completed without break, i.e. the mapping for this # parameter is accepted value_map[p] = v2s break loop_count += 1 if loop_count >= max_loop: errorlog("could not find unique string representation for the values of parameter {}".format(p)) break debuglog("value_map", value_map) return value_map def get_common_params(commits): keys = {} # { param: set(values), ... } counts = {} # { param: count, ... } for co, par, _ in commits: for k, v in par.items(): if k in keys: if v not in keys[k]: keys[k].add(v) counts[k] += 1 else: s = set() s.add(v) keys[k] = s counts[k] = 1 ncom = len(commits) common_params = {} unique_params = set() for k, vals in keys.items(): if len(vals) == 1 and counts[k] == ncom: for v in vals: common_params[k] = v else: unique_params.add(k) debuglog("common_params", common_params) debuglog("unique_params", unique_params) return common_params, unique_params def open_repo(path): try: repo = git.Repository(path) except KeyError as e: fatallog("there is no git repository at {}".format(path)) sys.exit(1) assert not repo.is_bare alt = os.path.join(repo.path, "objects/info/alternates") is_snapshot = os.path.exists(alt) if is_snapshot: debuglog("{} is a pampel snapshot".format(repo.workdir)) return repo, is_snapshot def open_main_repo(path): repo, is_snapshot = open_repo(path) # TODO read origin .pampel.json to detect if this is really a snapshot if is_snapshot: fatallog("{} is a pampel snapshot. I refuse to work on that.".format(repo.workdir)) sys.exit(1) return repo def process_check(args): repo = open_main_repo(args.repo[0]) with PampelConfig(repo) as conf: changed = False naux = [] for p in conf.aux_repos: debuglog("aux repo", p) try: git.Repository(p) except KeyError as e: warnlog("auxiliary repository {} does not exist.".format(p)) changed = True else: naux.append(p) nsnap = [] for p in conf.snapshots: debuglog("snapshot", p) try: git.Repository(p) except KeyError as e: warnlog("snapshot {} does not exist.".format(p)) changed = True else: nsnap.append(p) if args.cleanup and changed: conf.aux_repos = naux conf.snapshots = nsnap # TODO auto track if conf changed if args.cleanup and changed: commit_conf(repo, conf) def process_conf(args): repo = open_main_repo(args.repo[0]) try: aux_repos = [ git.Repository(auxp) for auxp in args.aux_repo ] except KeyError as e: fatallog("could not access git repository:\n{}".format(e)) sys.exit(1) with PampelConfig(repo) as conf: if len(aux_repos) != 0: infolog("setting auxiliary repositories") old_ars = list(conf.aux_repos) conf.aux_repos = [ a.workdir for a in aux_repos ] for a in aux_repos: if a.workdir not in old_ars: infolog("new aux repo: {0}".format(a.workdir)) else: old_ars.remove(a.workdir) for a in old_ars: warnlog("removed aux repo: {0}".format(a)) if len(args.command) != 0: if args.import_script: assert len(args.command) == 1 infolog("changing command to be run") debuglog("{0} ({1}) --> {2} ({3})".format( " ".join(conf.command), "import" if conf.import_script else "run", " ".join(args.command), "import" if args.import_script else "run" )) conf.command = args.command conf.import_script = args.import_script commit_conf(repo, conf) def commit_conf(repo, conf): conf.stage() tree = repo.index.write_tree() message = "pampel [conf]" repo.create_commit('HEAD', author, committer, message, tree, [ repo.head.peel().id ]) def status_to_str(s): if s == git.GIT_STATUS_CURRENT: return "CR" elif s == git.GIT_STATUS_IGNORED: return "IG" elif s == git.GIT_STATUS_INDEX_DELETED: return "ID" elif s == git.GIT_STATUS_INDEX_MODIFIED: return "IM" elif s == git.GIT_STATUS_INDEX_NEW: return "I?" elif s == git.GIT_STATUS_WT_DELETED: return " D" elif s == git.GIT_STATUS_WT_MODIFIED: return " M" elif s == git.GIT_STATUS_WT_NEW: return "??" else: return "--" # TODO lenient only for aux repos def process_rerun(args): repo, is_snapshot = open_repo(args.repo[0]) args.test = is_snapshot for commit in repo.walk(repo.head.peel().id, git.GIT_SORT_TIME): m = pampel_commit_re.match(commit.message) if m: debuglog("last run is {}".format(commit.id)) json_obj = commit_to_json(commit) cmdline = json_obj["run info"]["cmdline params"] if not isinstance(cmdline, list): warnlog("commandline is not provided as list. I will split it by myself.") cmdline = cmdline.split(" ") debuglog("cmdline", [ sys.argv[0] ] + cmdline[1:]) old_args = parser.parse_args(cmdline[1:]) # TODO what if imported infolog("rerunning [#{}]:".format(m.group(1)), os.path.basename(sys.argv[0]), " ".join(cmdline[1:]) ) args.cmdline = [ sys.argv[0] ] + cmdline[1:] # TODO the params of parser_commmon are ignored args.args = old_args.args if args.dry_run: return process_run(args, repo, not is_snapshot) # print which files have changed chfs = [ (f, s) for f, s in repo.status().items() if s != git.GIT_STATUS_IGNORED and f != ".pampel.json" and f != ".pampel-commit" ] if len(chfs) != 0: if args.ignore: def ign(il, f): for pat in il: if fnmatch.fnmatch(f, pat): return True return False do_ign = [ ign(args.ignore, f) for f, _ in chfs ] igfs = [ f for i, f in enumerate(chfs) if do_ign[i] ] chfs = [ f for i, f in enumerate(chfs) if not do_ign[i] ] else: igfs = [] if igfs: infolog("Files that changed from previous run (ignored):\n" + "\n".join( ( "{} {}".format(status_to_str(s), f) for f, s in sorted(igfs) ) )) if chfs: infolog("Files that changed from previous run:\n" + "\n".join( ( "{} {}".format(status_to_str(s), f) for f, s in sorted(chfs) ) )) else: goodlog("Rerun did not lead to any essential changes.") infolog("Resetting repository to the last commit.") repo.reset(repo.head.target, git.GIT_RESET_HARD) commit = repo.head.peel() # TODO add color # modify exit status infolog("HEAD now is at {} {}".format(str(commit.id)[:7], commit.message.split("\n",1)[0])) else: goodlog("No files changed during the rerun.") if args.test and len(chfs) != 0: infolog("You can use `git diff' and `git status' to view changes between this and the previous run\n" "and `git reset --hard' to discard the current changes.") return errorlog("did not find anything to be rerun") sys.exit(1) def process_run(args, repo=None, do_commit=True): if repo is None: repo = open_main_repo(args.repo[0]) # TODO warning when being lenient / ask for confirmation if args.lenient == 0: clean = is_repo_clean(repo) debuglog("repo {1}: {0}'".format(repo.workdir, ("clean" if clean else "dirty"))) elif args.lenient == 1: clean = is_repo_clean_output(repo) debuglog("repo {1}: output dirs {0}'".format(repo.workdir, ("clean" if clean else "dirty"))) else: # more lenient --> do not check clean = True if not clean: errorlog("Repository {0} is in a dirty state. " \ "Please commit all your changes before running {1}.".format(repo.workdir, PROG)) sys.exit(1) with PampelConfig(repo) as conf: aux_repos = [ git.Repository(auxp) for auxp in conf.aux_repos ] clean = True for r in aux_repos: clean = clean and is_repo_clean(r) if not clean: warnlog("Auxiliary repository `{}' is in a dirty state".format(r.workdir)) if args.lenient < 2: assert clean conf.run_count += 1 infolog("pampel run #{}".format(conf.run_count)) if not args.contin: clear_output_dirs(repo) cmd = conf.command if conf.import_script: infolog("importing script {0}".format(cmd[0])) try: with _redirect_12(): old_path = list(sys.path) sys.path.append(os.path.dirname(cmd[0])) # allows "local" imports from cmd[0] script = imp.load_source("script", cmd[0]) sys.path = old_path # TODO maybe reset this later s.t. script can make delayed imports except Exception as e: fatallog("importing script {} failed:\n{}".format(cmd[0], e)) raise script_args = {} for kv in args.args: k, v = kv.split("=", 2) script_args[k] = v try: debuglog("init script") with _redirect_12(): script.init(script_args) # TODO output certain messages only on error infolog("running script") startts = datetime.datetime.now() with _redirect_12(): stat = script.run() endts = datetime.datetime.now() # getting parameters at the end, since they might change during # program execution infolog("getting parameters") with _redirect_12(): params = script.get_params() except Exception as e: errorlog("script threw exception: {}".format(e)) raise else: infolog("running command {0}".format(" ".join(cmd + args.args))) startts = datetime.datetime.now() with _redirect_12(): stat = subprocess.call(cmd + args.args, cwd=repo.workdir) endts = datetime.datetime.now() if stat != 0: warnlog("command exited with status {0}".format(stat)) infolog("getting parameters") # TODO add script params for get-params with _redirect_12(): param_str = subprocess.check_output(cmd + [ "get-params" ] + args.args, cwd=repo.workdir) params = json.loads(param_str.decode("utf-8")) if not args.test: conf.stage() # generating commit message enc = JsonFormattedEncoder(-1) enc2 = JsonFormattedEncoder() enc2.start_obj() enc2.add_props_table([ ("cmdline params", args.cmdline ), ("run count", conf.run_count), None, ("command exit status", stat), ("start timestamp", startts), ("end timestamp", endts), ("time difference", endts-startts) ]) enc2.end_obj() # info_json = str(enc2) enc.start_obj() enc.add_encoded_prop("run info", str(enc2)) if aux_repos: json_ar = [ { "path": a.workdir, # TODO change to .git path "branch": "/".join(a.head.name.split("/")[2:]), "commit": a.head.peel().id, "dirty": not is_repo_clean(a) } for a in aux_repos ] enc.blank_line() enc.add_encoded_prop("aux repos", enc.encode(json_ar)) enc2.reset() enc2.start_obj() enc2.add_props_table(sorted(params.items())) enc2.end_obj() enc.blank_line() enc.add_encoded_prop("params", str(enc2)) enc.end_obj() message = "pampel [run #{}]\n\n".format(conf.run_count) + str(enc) debuglog(message) if do_commit: if args.lenient == 0 and not check_command_output(repo): errorlog("The command wrote something outside the output directories.\n" "I will leave the I/O repo in the current state.\n" "It is your responsibility to clean up the repo.") cfn = os.path.join(repo.workdir, ".pampel-commit") with open(cfn, "w") as cfh: cfh.write(message) infolog("Wrote the commit message to {0}".format(cfn)) elif stat != 0: errorlog("The command finished with a nonzero status.\nI will not commit this run.") cfn = os.path.join(repo.workdir, ".pampel-commit") with open(cfn, "w") as cfh: cfh.write(message) infolog("Wrote the commit message to {0}".format(cfn)) elif args.test: warnlog("I am in test mode. The current program run has not been committed.\n" "If you go on you could possibly lose data.\n" "To commit manually, use: {} commit".format(PROG)) cfn = os.path.join(repo.workdir, ".pampel-commit") with open(cfn, "w") as cfh: cfh.write(message) infolog("Wrote the commit message to {0}".format(cfn)) else: git_stage(repo) tree = repo.index.write_tree() oid = repo.create_commit('HEAD', author, committer, message, tree, [ repo.head.peel().id ]) # TODO: print error message for duplicate tags # repo.create_tag("pampel-run-{0}".format(conf.run_count), oid, git.GIT_OBJ_COMMIT, committer, "") def add_snapshot_multi(repo, args): snapp = args.snapshot newest_ref = args.to_ref[0] oldest_ref = args.from_ref[0] max_run = None min_run = None start_ref = None end_ref = None if newest_ref is None: start_ref = repo.head.target else: try: max_run = int(newest_ref) except ValueError: start_ref = repo.revparse_single(newest_ref).target else: start_ref = repo.head.target if oldest_ref is None: pass else: try: min_run = int(oldest_ref) except ValueError: end_ref = repo.revparse_single(oldest_ref).target else: pass # get pampel run commits if not args.any_branch: # build branch hierarchy # TODO stop if first effective branch point is found map_commit_ref = {} # map commit id -> [ reference name, ... ] for refn in repo.listall_references(): if refn.startswith("refs/heads/"): ref = repo.lookup_reference(refn) for commit in repo.walk(ref.peel().id, git.GIT_SORT_TIME): tracelog("commit", commit.id, commit.message.splitlines()[0]) m = pampel_commit_re.match(commit.message) if m: if max_run is not None and max_run < int(m.group(1)): # skip commits which are too new continue if min_run is not None and min_run > int(m.group(1)): # skip commits which are too old break if commit.id not in map_commit_ref: map_commit_ref[commit.id] = [] map_commit_ref[commit.id].append(refn) tracelog("{} -- {}".format(refn, commit.message.splitlines()[0])) if m: if min_run is not None and min_run == int(m.group(1)): # skip commits which are too old break if commit.id == end_ref: break commits = [] curr_branch_refs = None for commit in repo.walk(start_ref, git.GIT_SORT_TIME): m = pampel_commit_re.match(commit.message) if m: if max_run is not None and max_run < int(m.group(1)): # skip commits which are too new continue if min_run is not None and min_run > int(m.group(1)): # skip commits which are too old break if (not args.any_branch) and curr_branch_refs is None: # save refs of first commit that was not skipped curr_branch_refs = map_commit_ref[commit.id] json_obj = commit_to_json(commit) params = json_obj["params"] run_info = json_obj["run info"] commits.append((commit, params, run_info)) if min_run is not None and min_run == int(m.group(1)): # skip commits which are too old break if commit.id == end_ref: break if (not args.any_branch) \ and curr_branch_refs is not None \ and commit.id in map_commit_ref \ and map_commit_ref[commit.id] != curr_branch_refs: break common_params, unique_params = get_common_params(commits) info_content_dist = {} info_content = { "common parameters": common_params, "distinct parameters": info_content_dist } param_map = format_params(unique_params) # equivalence classes of same parameters classes = [] # list of tuples(params, [(commit, run_info)]) for co, par, inf in commits: for cls in classes: if cls[0] == par: cls[1].append((co, inf)) break else: tracelog("new class", inf["run count"], par) classes.append((par, [(co, inf)])) value_map = format_values(unique_params, [ c[0] for c in classes ]) with PampelConfig(repo) as conf: new_repos = [] map_commit_tag = {} for r in repo.listall_references(): if not r.startswith("refs/tags/"): continue n = r[len("refs/tags/"):] map_commit_tag[repo.lookup_reference(r).get_object().id] = n for par, coms in classes: # choose latest commit from each class commit, run_info = max(coms, key=lambda c: c[0].commit_time) # get distinctive properties props = {} for k, v in par.items(): if k not in common_params: props[k] = v try: tag = map_commit_tag[commit.id] except KeyError: tag = None dirname = get_snapshot_dir(props, run_info, tag, param_map, value_map) assert dirname not in info_content_dist info_content_dist[dirname] = props path = os.path.join(snapp, dirname) bn = "pampel-branch-{0}".format(run_info["run count"]) conf.create_snapshot(path, commit, bn) # TODO add more info, like tag or run count new_repos.append(path) conf.stage() with open(os.path.join(snapp, "pampel-snap.json"), "w") as fh: json.dump(info_content, fh, indent=2, sort_keys=True) return new_repos def add_snapshot_single(repo, args): snapp = args.snapshot new_repos = [] branch_names = [] commits = [] for single_ref_s in args.ref: commit = None try: run_count = int(single_ref_s) snap_dir = "pampel-run-{:03}".format(run_count) branch_name = "pampel-branch-{}".format(run_count) except ValueError: # TODO better dir and branch names commit = repo.revparse_single(single_ref_s) snap_dir = single_ref_s branch_name = "pampel-branch-{}".format(single_ref_s) else: # search for commit with given run_count for cmt in repo.walk(repo.head.target, git.GIT_SORT_TIME): m = pampel_commit_re.match(cmt.message) if m: if run_count == int(m.group(1)): debuglog("{} -- {}".format(run_count, cmt.message.splitlines()[0])) commit = cmt break if run_count == int(m.group(1)): # skip commits which are too old break assert commit is not None new_repos.append(os.path.join(snapp, snap_dir)) branch_names.append(branch_name) commits.append(commit) with PampelConfig(repo) as conf: for path, bn, cmt in zip(new_repos, branch_names, commits): conf.create_snapshot(path, cmt, bn) # TODO add more info, like tag or run count conf.stage() return new_repos # TODO also list and verify snapshots def process_snap(args): repop = args.repo[0] repo = open_main_repo(repop) clean = is_repo_clean(repo) verblog("repo {1}: {0}'".format(repo.workdir, ("clean" if clean else "dirty"))) # assert clean if args.action == "add": # TODO new param --subdirs if len(args.ref) != 0: # check arguments assert (not args.any_branch) \ and (not args.multi) \ and args.from_ref[0] is None \ and args.to_ref[0] is None # TODO let user choose exact output directory (not only parent dir) new_repos = [ add_snapshot_single(repo, args) ] else: new_repos = add_snapshot_multi(repo, args) message = "pampel [snap add]\n\n" \ + json.dumps({ "paths": new_repos }, indent=2) elif args.action == "delete": # TODO handle pampel-snap.json specially with PampelConfig(repo) as conf: for snap in args.snapshots: if os.path.isdir(snap): try: conf.remove_snapshot(snap) except git.GitError as e: errorlog(e.message) except KeyError: pass else: infolog("snapshot {} removed".format(snap)) else: warnlog("{} is not a directory".format(snap)) conf.stage() message = "pampel [snap delete]\n\n" \ + json.dumps(args.snapshots, indent=2) else: raise ValueError("unknown action: {0}".format(args.action)) debuglog(message) tree = repo.index.write_tree() repo.create_commit('HEAD', author, committer, message, tree, [ repo.head.peel().id ]) def process_comm(args): repop = args.repo[0] repo = git.Repository(repop) assert not repo.is_bare clean = is_repo_clean(repo) infolog("repo {1}: {0}'".format(repo.workdir, ("clean" if clean else "dirty"))) # TODO add some error messages with open(os.path.join(repo.workdir, ".pampel-commit")) as cmf: msg_1st = cmf.readline() msg_body = cmf.read() json_obj = json.loads(msg_body) run_count = json_obj["run info"]["run count"] git_stage(repo) tree = repo.index.write_tree() oid = repo.create_commit('HEAD', author, committer, msg_1st + msg_body, tree, [ repo.head.peel().id ]) # TODO: print error message for duplicate tags repo.create_tag("pampel-run-{0}".format(run_count), oid, git.GIT_OBJ_COMMIT, committer, "") def _run_main(): global parser, _LOGLEVEL, _COLORED_OUTPUT parser = argparse.ArgumentParser(description="Keep track of program inputs and outputs") # common parser_common = argparse.ArgumentParser(description="Common options", add_help=False) parser_common.add_argument("--repo", "-r", nargs=1, help="git repository where changes are written to", default=["."]) parser_common.add_argument("--verbose", "-v", action="count", help="be more verbose", default=_LOGLEVEL) parser_common.add_argument("--color", choices=["auto", "always", "never"], help="generate colored output", default="auto") subparsers = parser.add_subparsers(dest="subcommand", help="subcommands") subparsers.required = True # running commands parser_run = subparsers.add_parser("run", help="run a program", parents=[parser_common]) parser_run.add_argument("--continue", "-c", action="store_true", help="do not clear output directories before running COMMAND", dest="contin") parser_run.add_argument("--lenient", "-l", action="count", help="complain less if the repository is not clean", default=0) parser_run.add_argument("--test", "-t", action="store_true", help="do not commit changes after run") parser_run.add_argument("args", help="arguments for the command to run", nargs="*", default=[]) parser_run.set_defaults(func=process_run) # rerun last command parser_rerun = subparsers.add_parser("rerun", help="rerun command", parents=[parser_common]) parser_rerun.add_argument("--continue", "-c", action="store_true", help="do not clear output directories before running COMMAND", dest="contin") parser_rerun.add_argument("--lenient", "-l", action="count", help="complain less if the repository is not clean", default=0) parser_rerun.add_argument("--test", "-t", action="store_true", help="do not commit changes after run") parser_rerun.add_argument("--ignore", action="append", help="ignore changes to matching paths") parser_rerun.add_argument("--dry-run", "-n", action="store_true", help="do not actually run, only print what would have ben run.") parser_rerun.set_defaults(func=process_rerun) # check parser_check = subparsers.add_parser("check", help="check configuration and repositories", parents=[parser_common]) parser_check.add_argument("--cleanup", action="store_true", help="remove repositories which can not be found from the configuration file") parser_check.set_defaults(func=process_check) # config parser_conf = subparsers.add_parser("conf", help="set configuration options", parents=[parser_common]) parser_conf.add_argument("--aux-repo", "-a", action="append", help="auxiliary git repositories, e.g., where source code sits", default=[]) parser_conf.add_argument("--command", help="command to run", nargs="+", default=[]) parser_conf.add_argument("--import", action="store_true", help="import command as a python script instead of running it as a new process", dest="import_script") parser_conf.set_defaults(func=process_conf) # snapshots parser_snap = subparsers.add_parser("snap", help="manage snapshots", parents=[parser_common]) subparsers2 = parser_snap.add_subparsers(dest="action", help="action on snapshot") subparsers2.required = True parser_snap_add = subparsers2.add_parser("add", help="create shared git repo at the specified location", parents=[parser_common]) parser_snap_add.add_argument("snapshot", metavar="SNAPSHOT_PATH") group_single = parser_snap_add.add_argument_group("single revision selection") group_single.add_argument("--ref", "-R", action="append", default=[], help="the revision to make a snapshof of") group_multi = parser_snap_add.add_argument_group("multiple revision selection") group_multi.add_argument("--from", nargs=1, default=[None], dest="from_ref", help="oldest revision of which a snapshot is made") group_multi.add_argument("--to", nargs=1, default=[None], dest="to_ref", help="newest revision of which a snapshot is made") group_multi.add_argument("--any-branch", action="store_true", help="look for matching revisions in any branch") group_multi.add_argument("--multi", action="store_true", help="select multiple revisions to make snapshots of") parser_snap_del = subparsers2.add_parser("delete", help="delete shared git repo at the specified location", parents=[parser_common]) parser_snap_del.add_argument("snapshots", metavar="SNAPSHOT_PATH", nargs="+") parser_snap.set_defaults(func=process_snap) # commit parser_comm = subparsers.add_parser("commit", help="commit changes", parents=[parser_common]) parser_comm.set_defaults(func=process_comm) args = parser.parse_args() _LOGLEVEL = args.verbose _COLORED_OUTPUT = args.color args.cmdline = sys.argv args.func(args) if __name__ == "__main__": _run_main()
gpl-3.0
Jorge-Rodriguez/ansible
lib/ansible/modules/network/f5/bigip_ssl_certificate.py
9
17763
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_ssl_certificate short_description: Import/Delete certificates from BIG-IP description: - This module will import/delete SSL certificates on BIG-IP LTM. Certificates can be imported from certificate and key files on the local disk, in PEM format. version_added: 2.2 options: content: description: - Sets the contents of a certificate directly to the specified value. This is used with lookup plugins or for anything with formatting or - C(content) must be provided when C(state) is C(present). aliases: ['cert_content'] state: description: - Certificate state. This determines if the provided certificate and key is to be made C(present) on the device or C(absent). default: present choices: - present - absent name: description: - SSL Certificate Name. This is the cert name used when importing a certificate into the F5. It also determines the filenames of the objects on the LTM. required: True issuer_cert: description: - Issuer certificate used for OCSP monitoring. - This parameter is only valid on versions of BIG-IP 13.0.0 or above. version_added: 2.5 partition: description: - Device partition to manage resources on. default: Common version_added: 2.5 notes: - This module does not behave like other modules that you might include in roles where referencing files or templates first looks in the role's files or templates directory. To have it behave that way, use the Ansible file or template lookup (see Examples). The lookups behave as expected in a role context. extends_documentation_fragment: f5 requirements: - BIG-IP >= v12 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Use a file lookup to import PEM Certificate bigip_ssl_certificate: name: certificate-name state: present content: "{{ lookup('file', '/path/to/cert.crt') }}" provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Use a file lookup to import CA certificate chain bigip_ssl_certificate: name: ca-chain-name state: present content: "{{ lookup('file', '/path/to/ca-chain.crt') }}" provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost - name: Delete Certificate bigip_ssl_certificate: name: certificate-name state: absent provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost ''' RETURN = r''' cert_name: description: The name of the certificate that the user provided returned: created type: str sample: cert1 filename: description: - The name of the SSL certificate. returned: created type: str sample: cert1.crt checksum: description: SHA1 checksum of the cert that was provided. returned: changed and created type: str sample: f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0 source_path: description: Path on BIG-IP where the source of the certificate is stored. returned: created type: str sample: /var/config/rest/downloads/cert1.crt ''' import hashlib import os import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.icontrol import upload_file except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.icontrol import upload_file try: from StringIO import StringIO except ImportError: from io import StringIO class Parameters(AnsibleF5Parameters): download_path = '/var/config/rest/downloads' api_map = { 'sourcePath': 'source_path', 'issuerCert': 'issuer_cert', } updatables = [ 'content', 'issuer_cert', 'source_path', ] returnables = [ 'filename', 'checksum', 'source_path', 'issuer_cert', ] api_attributes = [ 'issuerCert', 'sourcePath', ] class ApiParameters(Parameters): @property def checksum(self): if self._values['checksum'] is None: return None pattern = r'SHA1:\d+:(?P<value>[\w+]{40})' matches = re.match(pattern, self._values['checksum']) if matches: return matches.group('value') else: return None @property def filename(self): return self._values['name'] class ModuleParameters(Parameters): def _get_hash(self, content): k = hashlib.sha1() s = StringIO(content) while True: data = s.read(1024) if not data: break k.update(data.encode('utf-8')) return k.hexdigest() @property def issuer_cert(self): if self._values['issuer_cert'] is None: return None name = fq_name(self.partition, self._values['issuer_cert']) if name.endswith('.crt'): return name else: return name + '.crt' @property def checksum(self): if self.content is None: return None return self._get_hash(self.content) @property def filename(self): if self.name.endswith('.crt'): return self.name else: return self.name + '.crt' @property def source_path(self): result = 'file://' + os.path.join( self.download_path, self.filename ) return result class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class ReportableChanges(Changes): pass class UsableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def source_path(self): if self.want.source_path is None: return None if self.want.source_path == self.have.source_path: if self.content: return self.want.source_path if self.want.source_path != self.have.source_path: return self.want.source_path @property def content(self): if self.want.checksum != self.have.checksum: result = dict( checksum=self.want.checksum, content=self.want.content ) return result class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.module.check_mode: return True self.remove_from_device() return True def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def exists(self): uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.filename) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def upload_file_to_device(self, content, name): url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, content, name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def update_on_device(self): content = StringIO(self.want.content) self.upload_file_to_device(content, self.want.filename) params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.filename) ) resp = self.client.api.put(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def create_on_device(self): content = StringIO(self.want.content) self.upload_file_to_device(content, self.want.filename) uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/".format( self.client.provider['server'], self.client.provider['server_port'], ) params = dict( sourcePath=self.want.source_path, name=self.want.filename, partition=self.want.partition ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) # This needs to be done because of the way that BIG-IP creates certificates. # # The extra params (such as OCSP and issuer stuff) are not available in the # payload. In a nutshell, the available resource attributes *change* after # a create so that *more* are available. params = self.want.api_params() if params: uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.filename) ) resp = self.client.api.put(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.filename) ) query = '?expandSubcollections=true' resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.filename) ) response = self.client.api.delete(uri) if response.status == 200: return True raise F5ModuleError(response.content) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict( required=True ), content=dict(aliases=['cert_content']), state=dict( default='present', choices=['absent', 'present'] ), issuer_cert=dict(), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) exit_json(module, results, client) except F5ModuleError as ex: cleanup_tokens(client) fail_json(module, ex, client) if __name__ == '__main__': main()
gpl-3.0
seungjin/app5-seungjin-net.appspot.com
django/template/defaultfilters.py
150
29467
"""Default variable filters.""" import re from decimal import Decimal, InvalidOperation, ROUND_HALF_UP import random as random_module try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.4 fallback. from django.template.base import Variable, Library from django.conf import settings from django.utils import formats from django.utils.encoding import force_unicode, iri_to_uri from django.utils.html import conditional_escape from django.utils.safestring import mark_safe, SafeData from django.utils.translation import ugettext, ungettext register = Library() ####################### # STRING DECORATOR # ####################### def stringfilter(func): """ Decorator for filters which should only receive unicode objects. The object passed as the first positional argument will be converted to a unicode object. """ def _dec(*args, **kwargs): if args: args = list(args) args[0] = force_unicode(args[0]) if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False): return mark_safe(func(*args, **kwargs)) return func(*args, **kwargs) # Include a reference to the real function (used to check original # arguments by the template parser). _dec._decorated_function = getattr(func, '_decorated_function', func) for attr in ('is_safe', 'needs_autoescape'): if hasattr(func, attr): setattr(_dec, attr, getattr(func, attr)) return wraps(func)(_dec) ################### # STRINGS # ################### def addslashes(value): """ Adds slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. """ return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'") addslashes.is_safe = True addslashes = stringfilter(addslashes) def capfirst(value): """Capitalizes the first character of the value.""" return value and value[0].upper() + value[1:] capfirst.is_safe=True capfirst = stringfilter(capfirst) def escapejs(value): """Hex encodes characters for use in JavaScript strings.""" from django.utils.html import escapejs return escapejs(value) escapejs = stringfilter(escapejs) def fix_ampersands(value): """Replaces ampersands with ``&amp;`` entities.""" from django.utils.html import fix_ampersands return fix_ampersands(value) fix_ampersands.is_safe=True fix_ampersands = stringfilter(fix_ampersands) # Values for testing floatformat input against infinity and NaN representations, # which differ across platforms and Python versions. Some (i.e. old Windows # ones) are not recognized by Decimal but we want to return them unchanged vs. # returning an empty string as we do for completley invalid input. Note these # need to be built up from values that are not inf/nan, since inf/nan values do # not reload properly from .pyc files on Windows prior to some level of Python 2.5 # (see Python Issue757815 and Issue1080440). pos_inf = 1e200 * 1e200 neg_inf = -1e200 * 1e200 nan = (1e200 * 1e200) / (1e200 * 1e200) special_floats = [str(pos_inf), str(neg_inf), str(nan)] def floatformat(text, arg=-1): """ Displays a float to a specified number of decimal places. If called without an argument, it displays the floating point number with one decimal place -- but only if there's a decimal place to be displayed: * num1 = 34.23234 * num2 = 34.00000 * num3 = 34.26000 * {{ num1|floatformat }} displays "34.2" * {{ num2|floatformat }} displays "34" * {{ num3|floatformat }} displays "34.3" If arg is positive, it will always display exactly arg number of decimal places: * {{ num1|floatformat:3 }} displays "34.232" * {{ num2|floatformat:3 }} displays "34.000" * {{ num3|floatformat:3 }} displays "34.260" If arg is negative, it will display arg number of decimal places -- but only if there are places to be displayed: * {{ num1|floatformat:"-3" }} displays "34.232" * {{ num2|floatformat:"-3" }} displays "34" * {{ num3|floatformat:"-3" }} displays "34.260" If the input float is infinity or NaN, the (platform-dependent) string representation of that value will be displayed. """ try: input_val = force_unicode(text) d = Decimal(input_val) except UnicodeEncodeError: return u'' except InvalidOperation: if input_val in special_floats: return input_val try: d = Decimal(force_unicode(float(text))) except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError): return u'' try: p = int(arg) except ValueError: return input_val try: m = int(d) - d except (ValueError, OverflowError, InvalidOperation): return input_val if not m and p < 0: return mark_safe(formats.number_format(u'%d' % (int(d)), 0)) if p == 0: exp = Decimal(1) else: exp = Decimal(u'1.0') / (Decimal(10) ** abs(p)) try: # Avoid conversion to scientific notation by accessing `sign`, `digits` # and `exponent` from `Decimal.as_tuple()` directly. sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP).as_tuple() digits = [unicode(digit) for digit in reversed(digits)] while len(digits) <= abs(exponent): digits.append(u'0') digits.insert(-exponent, u'.') if sign: digits.append(u'-') number = u''.join(reversed(digits)) return mark_safe(formats.number_format(number, abs(p))) except InvalidOperation: return input_val floatformat.is_safe = True def iriencode(value): """Escapes an IRI value for use in a URL.""" return force_unicode(iri_to_uri(value)) iriencode.is_safe = True iriencode = stringfilter(iriencode) def linenumbers(value, autoescape=None): """Displays text with line numbers.""" from django.utils.html import escape lines = value.split(u'\n') # Find the maximum width of the line count, for use with zero padding # string format command width = unicode(len(unicode(len(lines)))) if not autoescape or isinstance(value, SafeData): for i, line in enumerate(lines): lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line) else: for i, line in enumerate(lines): lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line)) return mark_safe(u'\n'.join(lines)) linenumbers.is_safe = True linenumbers.needs_autoescape = True linenumbers = stringfilter(linenumbers) def lower(value): """Converts a string into all lowercase.""" return value.lower() lower.is_safe = True lower = stringfilter(lower) def make_list(value): """ Returns the value turned into a list. For an integer, it's a list of digits. For a string, it's a list of characters. """ return list(value) make_list.is_safe = False make_list = stringfilter(make_list) def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ import unicodedata value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return mark_safe(re.sub('[-\s]+', '-', value)) slugify.is_safe = True slugify = stringfilter(slugify) def stringformat(value, arg): """ Formats the variable according to the arg, a string formatting specifier. This specifier uses Python string formating syntax, with the exception that the leading "%" is dropped. See http://docs.python.org/lib/typesseq-strings.html for documentation of Python string formatting """ try: return (u"%" + unicode(arg)) % value except (ValueError, TypeError): return u"" stringformat.is_safe = True def title(value): """Converts a string into titlecase.""" t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title()) return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t) title.is_safe = True title = stringfilter(title) def truncatewords(value, arg): """ Truncates a string after a certain number of words. Argument: Number of words to truncate after. Newlines within the string are removed. """ from django.utils.text import truncate_words try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return truncate_words(value, length) truncatewords.is_safe = True truncatewords = stringfilter(truncatewords) def truncatewords_html(value, arg): """ Truncates HTML after a certain number of words. Argument: Number of words to truncate after. Newlines in the HTML are preserved. """ from django.utils.text import truncate_html_words try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return truncate_html_words(value, length) truncatewords_html.is_safe = True truncatewords_html = stringfilter(truncatewords_html) def upper(value): """Converts a string into all uppercase.""" return value.upper() upper.is_safe = False upper = stringfilter(upper) def urlencode(value, safe=None): """ Escapes a value for use in a URL. Takes an optional ``safe`` parameter used to determine the characters which should not be escaped by Django's ``urlquote`` method. If not provided, the default safe characters will be used (but an empty string can be provided when *all* characters should be escaped). """ from django.utils.http import urlquote kwargs = {} if safe is not None: kwargs['safe'] = safe return urlquote(value, **kwargs) urlencode.is_safe = False urlencode = stringfilter(urlencode) def urlize(value, autoescape=None): """Converts URLs in plain text into clickable links.""" from django.utils.html import urlize return mark_safe(urlize(value, nofollow=True, autoescape=autoescape)) urlize.is_safe=True urlize.needs_autoescape = True urlize = stringfilter(urlize) def urlizetrunc(value, limit, autoescape=None): """ Converts URLs into clickable links, truncating URLs to the given character limit, and adding 'rel=nofollow' attribute to discourage spamming. Argument: Length to truncate URLs to. """ from django.utils.html import urlize return mark_safe(urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape)) urlizetrunc.is_safe = True urlizetrunc.needs_autoescape = True urlizetrunc = stringfilter(urlizetrunc) def wordcount(value): """Returns the number of words.""" return len(value.split()) wordcount.is_safe = False wordcount = stringfilter(wordcount) def wordwrap(value, arg): """ Wraps words at specified line length. Argument: number of characters to wrap the text at. """ from django.utils.text import wrap return wrap(value, int(arg)) wordwrap.is_safe = True wordwrap = stringfilter(wordwrap) def ljust(value, arg): """ Left-aligns the value in a field of a given width. Argument: field size. """ return value.ljust(int(arg)) ljust.is_safe = True ljust = stringfilter(ljust) def rjust(value, arg): """ Right-aligns the value in a field of a given width. Argument: field size. """ return value.rjust(int(arg)) rjust.is_safe = True rjust = stringfilter(rjust) def center(value, arg): """Centers the value in a field of a given width.""" return value.center(int(arg)) center.is_safe = True center = stringfilter(center) def cut(value, arg): """ Removes all values of arg from the given string. """ safe = isinstance(value, SafeData) value = value.replace(arg, u'') if safe and arg != ';': return mark_safe(value) return value cut = stringfilter(cut) ################### # HTML STRINGS # ################### def escape(value): """ Marks the value as a string that should not be auto-escaped. """ from django.utils.safestring import mark_for_escaping return mark_for_escaping(value) escape.is_safe = True escape = stringfilter(escape) def force_escape(value): """ Escapes a string's HTML. This returns a new string containing the escaped characters (as opposed to "escape", which marks the content for later possible escaping). """ from django.utils.html import escape return mark_safe(escape(value)) force_escape = stringfilter(force_escape) force_escape.is_safe = True def linebreaks(value, autoescape=None): """ Replaces line breaks in plain text with appropriate HTML; a single newline becomes an HTML line break (``<br />``) and a new line followed by a blank line becomes a paragraph break (``</p>``). """ from django.utils.html import linebreaks autoescape = autoescape and not isinstance(value, SafeData) return mark_safe(linebreaks(value, autoescape)) linebreaks.is_safe = True linebreaks.needs_autoescape = True linebreaks = stringfilter(linebreaks) def linebreaksbr(value, autoescape=None): """ Converts all newlines in a piece of plain text to HTML line breaks (``<br />``). """ if autoescape and not isinstance(value, SafeData): from django.utils.html import escape value = escape(value) return mark_safe(value.replace('\n', '<br />')) linebreaksbr.is_safe = True linebreaksbr.needs_autoescape = True linebreaksbr = stringfilter(linebreaksbr) def safe(value): """ Marks the value as a string that should not be auto-escaped. """ return mark_safe(value) safe.is_safe = True safe = stringfilter(safe) def safeseq(value): """ A "safe" filter for sequences. Marks each element in the sequence, individually, as safe, after converting them to unicode. Returns a list with the results. """ return [mark_safe(force_unicode(obj)) for obj in value] safeseq.is_safe = True def removetags(value, tags): """Removes a space separated list of [X]HTML tags from the output.""" tags = [re.escape(tag) for tag in tags.split()] tags_re = u'(%s)' % u'|'.join(tags) starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U) endtag_re = re.compile(u'</%s>' % tags_re) value = starttag_re.sub(u'', value) value = endtag_re.sub(u'', value) return value removetags.is_safe = True removetags = stringfilter(removetags) def striptags(value): """Strips all [X]HTML tags.""" from django.utils.html import strip_tags return strip_tags(value) striptags.is_safe = True striptags = stringfilter(striptags) ################### # LISTS # ################### def dictsort(value, arg): """ Takes a list of dicts, returns that list sorted by the property given in the argument. """ return sorted(value, key=Variable(arg).resolve) dictsort.is_safe = False def dictsortreversed(value, arg): """ Takes a list of dicts, returns that list sorted in reverse order by the property given in the argument. """ return sorted(value, key=Variable(arg).resolve, reverse=True) dictsortreversed.is_safe = False def first(value): """Returns the first item in a list.""" try: return value[0] except IndexError: return u'' first.is_safe = False def join(value, arg, autoescape=None): """ Joins a list with a string, like Python's ``str.join(list)``. """ value = map(force_unicode, value) if autoescape: value = [conditional_escape(v) for v in value] try: data = conditional_escape(arg).join(value) except AttributeError: # fail silently but nicely return value return mark_safe(data) join.is_safe = True join.needs_autoescape = True def last(value): "Returns the last item in a list" try: return value[-1] except IndexError: return u'' last.is_safe = True def length(value): """Returns the length of the value - useful for lists.""" try: return len(value) except (ValueError, TypeError): return '' length.is_safe = True def length_is(value, arg): """Returns a boolean of whether the value's length is the argument.""" try: return len(value) == int(arg) except (ValueError, TypeError): return '' length_is.is_safe = False def random(value): """Returns a random item from the list.""" return random_module.choice(value) random.is_safe = True def slice_(value, arg): """ Returns a slice of the list. Uses the same syntax as Python's list slicing; see http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice for an introduction. """ try: bits = [] for x in arg.split(u':'): if len(x) == 0: bits.append(None) else: bits.append(int(x)) return value[slice(*bits)] except (ValueError, TypeError): return value # Fail silently. slice_.is_safe = True def unordered_list(value, autoescape=None): """ Recursively takes a self-nested list and returns an HTML unordered list -- WITHOUT opening and closing <ul> tags. The list is assumed to be in the proper format. For example, if ``var`` contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then ``{{ var|unordered_list }}`` would return:: <li>States <ul> <li>Kansas <ul> <li>Lawrence</li> <li>Topeka</li> </ul> </li> <li>Illinois</li> </ul> </li> """ if autoescape: from django.utils.html import conditional_escape escaper = conditional_escape else: escaper = lambda x: x def convert_old_style_list(list_): """ Converts old style lists to the new easier to understand format. The old list format looked like: ['Item 1', [['Item 1.1', []], ['Item 1.2', []]] And it is converted to: ['Item 1', ['Item 1.1', 'Item 1.2]] """ if not isinstance(list_, (tuple, list)) or len(list_) != 2: return list_, False first_item, second_item = list_ if second_item == []: return [first_item], True try: it = iter(second_item) # see if second item is iterable except TypeError: return list_, False old_style_list = True new_second_item = [] for sublist in second_item: item, old_style_list = convert_old_style_list(sublist) if not old_style_list: break new_second_item.extend(item) if old_style_list: second_item = new_second_item return [first_item, second_item], old_style_list def _helper(list_, tabs=1): indent = u'\t' * tabs output = [] list_length = len(list_) i = 0 while i < list_length: title = list_[i] sublist = '' sublist_item = None if isinstance(title, (list, tuple)): sublist_item = title title = '' elif i < list_length - 1: next_item = list_[i+1] if next_item and isinstance(next_item, (list, tuple)): # The next item is a sub-list. sublist_item = next_item # We've processed the next item now too. i += 1 if sublist_item: sublist = _helper(sublist_item, tabs+1) sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist, indent, indent) output.append('%s<li>%s%s</li>' % (indent, escaper(force_unicode(title)), sublist)) i += 1 return '\n'.join(output) value, converted = convert_old_style_list(value) return mark_safe(_helper(value)) unordered_list.is_safe = True unordered_list.needs_autoescape = True ################### # INTEGERS # ################### def add(value, arg): """Adds the arg to the value.""" try: return int(value) + int(arg) except (ValueError, TypeError): try: return value + arg except: return value add.is_safe = False def get_digit(value, arg): """ Given a whole number, returns the requested digit of it, where 1 is the right-most digit, 2 is the second-right-most digit, etc. Returns the original value for invalid input (if input or argument is not an integer, or if argument is less than 1). Otherwise, output is always an integer. """ try: arg = int(arg) value = int(value) except ValueError: return value # Fail silently for an invalid argument if arg < 1: return value try: return int(str(value)[-arg]) except IndexError: return 0 get_digit.is_safe = False ################### # DATES # ################### def date(value, arg=None): """Formats a date according to the given format.""" from django.utils.dateformat import format if not value: return u'' if arg is None: arg = settings.DATE_FORMAT try: return formats.date_format(value, arg) except AttributeError: try: return format(value, arg) except AttributeError: return '' date.is_safe = False def time(value, arg=None): """Formats a time according to the given format.""" from django.utils import dateformat if value in (None, u''): return u'' if arg is None: arg = settings.TIME_FORMAT try: return formats.time_format(value, arg) except AttributeError: try: return dateformat.time_format(value, arg) except AttributeError: return '' time.is_safe = False def timesince(value, arg=None): """Formats a date as the time since that date (i.e. "4 days, 6 hours").""" from django.utils.timesince import timesince if not value: return u'' try: if arg: return timesince(value, arg) return timesince(value) except (ValueError, TypeError): return u'' timesince.is_safe = False def timeuntil(value, arg=None): """Formats a date as the time until that date (i.e. "4 days, 6 hours").""" from django.utils.timesince import timeuntil if not value: return u'' try: return timeuntil(value, arg) except (ValueError, TypeError): return u'' timeuntil.is_safe = False ################### # LOGIC # ################### def default(value, arg): """If value is unavailable, use given default.""" return value or arg default.is_safe = False def default_if_none(value, arg): """If value is None, use given default.""" if value is None: return arg return value default_if_none.is_safe = False def divisibleby(value, arg): """Returns True if the value is devisible by the argument.""" return int(value) % int(arg) == 0 divisibleby.is_safe = False def yesno(value, arg=None): """ Given a string mapping values for true, false and (optionally) None, returns one of those strings accoding to the value: ========== ====================== ================================== Value Argument Outputs ========== ====================== ================================== ``True`` ``"yeah,no,maybe"`` ``yeah`` ``False`` ``"yeah,no,maybe"`` ``no`` ``None`` ``"yeah,no,maybe"`` ``maybe`` ``None`` ``"yeah,no"`` ``"no"`` (converts None to False if no mapping for None is given. ========== ====================== ================================== """ if arg is None: arg = ugettext('yes,no,maybe') bits = arg.split(u',') if len(bits) < 2: return value # Invalid arg. try: yes, no, maybe = bits except ValueError: # Unpack list of wrong size (no "maybe" value provided). yes, no, maybe = bits[0], bits[1], bits[1] if value is None: return maybe if value: return yes return no yesno.is_safe = False ################### # MISC # ################### def filesizeformat(bytes): """ Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc). """ try: bytes = float(bytes) except (TypeError,ValueError,UnicodeDecodeError): return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0} filesize_number_format = lambda value: formats.number_format(round(value, 1), 1) if bytes < 1024: return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes} if bytes < 1024 * 1024: return ugettext("%s KB") % filesize_number_format(bytes / 1024) if bytes < 1024 * 1024 * 1024: return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024)) if bytes < 1024 * 1024 * 1024 * 1024: return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024)) if bytes < 1024 * 1024 * 1024 * 1024 * 1024: return ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024)) return ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024)) filesizeformat.is_safe = True def pluralize(value, arg=u's'): """ Returns a plural suffix if the value is not 1. By default, 's' is used as the suffix: * If value is 0, vote{{ value|pluralize }} displays "0 votes". * If value is 1, vote{{ value|pluralize }} displays "1 vote". * If value is 2, vote{{ value|pluralize }} displays "2 votes". If an argument is provided, that string is used instead: * If value is 0, class{{ value|pluralize:"es" }} displays "0 classes". * If value is 1, class{{ value|pluralize:"es" }} displays "1 class". * If value is 2, class{{ value|pluralize:"es" }} displays "2 classes". If the provided argument contains a comma, the text before the comma is used for the singular case and the text after the comma is used for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies". """ if not u',' in arg: arg = u',' + arg bits = arg.split(u',') if len(bits) > 2: return u'' singular_suffix, plural_suffix = bits[:2] try: if int(value) != 1: return plural_suffix except ValueError: # Invalid string that's not a number. pass except TypeError: # Value isn't a string or a number; maybe it's a list? try: if len(value) != 1: return plural_suffix except TypeError: # len() of unsized object. pass return singular_suffix pluralize.is_safe = False def phone2numeric(value): """Takes a phone number and converts it in to its numerical equivalent.""" from django.utils.text import phone2numeric return phone2numeric(value) phone2numeric.is_safe = True def pprint(value): """A wrapper around pprint.pprint -- for debugging, really.""" from pprint import pformat try: return pformat(value) except Exception, e: return u"Error in formatting: %s" % force_unicode(e, errors="replace") pprint.is_safe = True # Syntax: register.filter(name of filter, callback) register.filter(add) register.filter(addslashes) register.filter(capfirst) register.filter(center) register.filter(cut) register.filter(date) register.filter(default) register.filter(default_if_none) register.filter(dictsort) register.filter(dictsortreversed) register.filter(divisibleby) register.filter(escape) register.filter(escapejs) register.filter(filesizeformat) register.filter(first) register.filter(fix_ampersands) register.filter(floatformat) register.filter(force_escape) register.filter(get_digit) register.filter(iriencode) register.filter(join) register.filter(last) register.filter(length) register.filter(length_is) register.filter(linebreaks) register.filter(linebreaksbr) register.filter(linenumbers) register.filter(ljust) register.filter(lower) register.filter(make_list) register.filter(phone2numeric) register.filter(pluralize) register.filter(pprint) register.filter(removetags) register.filter(random) register.filter(rjust) register.filter(safe) register.filter(safeseq) register.filter('slice', slice_) register.filter(slugify) register.filter(stringformat) register.filter(striptags) register.filter(time) register.filter(timesince) register.filter(timeuntil) register.filter(title) register.filter(truncatewords) register.filter(truncatewords_html) register.filter(unordered_list) register.filter(upper) register.filter(urlencode) register.filter(urlize) register.filter(urlizetrunc) register.filter(wordcount) register.filter(wordwrap) register.filter(yesno)
bsd-3-clause
axinging/chromium-crosswalk
third_party/cython/src/Cython/Compiler/Scanning.py
90
16183
# cython: infer_types=True, language_level=3, py2_import=True # # Cython Scanner # import os import platform import cython cython.declare(EncodedString=object, any_string_prefix=unicode, IDENT=unicode, print_function=object) from Cython import Utils from Cython.Plex.Scanners import Scanner from Cython.Plex.Errors import UnrecognizedInput from Errors import error from Lexicon import any_string_prefix, make_lexicon, IDENT from Future import print_function from StringEncoding import EncodedString debug_scanner = 0 trace_scanner = 0 scanner_debug_flags = 0 scanner_dump_file = None lexicon = None def get_lexicon(): global lexicon if not lexicon: lexicon = make_lexicon() return lexicon #------------------------------------------------------------------ py_reserved_words = [ "global", "nonlocal", "def", "class", "print", "del", "pass", "break", "continue", "return", "raise", "import", "exec", "try", "except", "finally", "while", "if", "elif", "else", "for", "in", "assert", "and", "or", "not", "is", "in", "lambda", "from", "yield", "with", "nonlocal", ] pyx_reserved_words = py_reserved_words + [ "include", "ctypedef", "cdef", "cpdef", "cimport", "DEF", "IF", "ELIF", "ELSE" ] class Method(object): def __init__(self, name): self.name = name self.__name__ = name # for Plex tracing def __call__(self, stream, text): return getattr(stream, self.name)(text) #------------------------------------------------------------------ class CompileTimeScope(object): def __init__(self, outer = None): self.entries = {} self.outer = outer def declare(self, name, value): self.entries[name] = value def update(self, other): self.entries.update(other) def lookup_here(self, name): return self.entries[name] def __contains__(self, name): return name in self.entries def lookup(self, name): try: return self.lookup_here(name) except KeyError: outer = self.outer if outer: return outer.lookup(name) else: raise def initial_compile_time_env(): benv = CompileTimeScope() names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE') for name, value in zip(names, platform.uname()): benv.declare(name, value) try: import __builtin__ as builtins except ImportError: import builtins names = ('False', 'True', 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len', 'list', 'long', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum', 'tuple', 'xrange', 'zip') for name in names: try: benv.declare(name, getattr(builtins, name)) except AttributeError: # ignore, likely Py3 pass denv = CompileTimeScope(benv) return denv #------------------------------------------------------------------ class SourceDescriptor(object): """ A SourceDescriptor should be considered immutable. """ _file_type = 'pyx' _escaped_description = None _cmp_name = '' def __str__(self): assert False # To catch all places where a descriptor is used directly as a filename def set_file_type_from_name(self, filename): name, ext = os.path.splitext(filename) self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx' def is_cython_file(self): return self._file_type in ('pyx', 'pxd') def is_python_file(self): return self._file_type == 'py' def get_escaped_description(self): if self._escaped_description is None: self._escaped_description = \ self.get_description().encode('ASCII', 'replace').decode("ASCII") return self._escaped_description def __gt__(self, other): # this is only used to provide some sort of order try: return self._cmp_name > other._cmp_name except AttributeError: return False def __lt__(self, other): # this is only used to provide some sort of order try: return self._cmp_name < other._cmp_name except AttributeError: return False def __le__(self, other): # this is only used to provide some sort of order try: return self._cmp_name <= other._cmp_name except AttributeError: return False class FileSourceDescriptor(SourceDescriptor): """ Represents a code source. A code source is a more generic abstraction for a "filename" (as sometimes the code doesn't come from a file). Instances of code sources are passed to Scanner.__init__ as the optional name argument and will be passed back when asking for the position()-tuple. """ def __init__(self, filename, path_description=None): filename = Utils.decode_filename(filename) self.path_description = path_description or filename self.filename = filename self.set_file_type_from_name(filename) self._cmp_name = filename self._lines = {} def get_lines(self, encoding=None, error_handling=None): # we cache the lines only the second time this is called, in # order to save memory when they are only used once key = (encoding, error_handling) try: lines = self._lines[key] if lines is not None: return lines except KeyError: pass f = Utils.open_source_file( self.filename, encoding=encoding, error_handling=error_handling, # newline normalisation is costly before Py2.6 require_normalised_newlines=False) try: lines = list(f) finally: f.close() if key in self._lines: self._lines[key] = lines else: # do not cache the first access, but remember that we # already read it once self._lines[key] = None return lines def get_description(self): return self.path_description def get_error_description(self): path = self.filename cwd = Utils.decode_filename(os.getcwd() + os.path.sep) if path.startswith(cwd): return path[len(cwd):] return path def get_filenametable_entry(self): return self.filename def __eq__(self, other): return isinstance(other, FileSourceDescriptor) and self.filename == other.filename def __hash__(self): return hash(self.filename) def __repr__(self): return "<FileSourceDescriptor:%s>" % self.filename class StringSourceDescriptor(SourceDescriptor): """ Instances of this class can be used instead of a filenames if the code originates from a string object. """ filename = None def __init__(self, name, code): self.name = name #self.set_file_type_from_name(name) self.codelines = [x + "\n" for x in code.split("\n")] self._cmp_name = name def get_lines(self, encoding=None, error_handling=None): if not encoding: return self.codelines else: return [ line.encode(encoding, error_handling).decode(encoding) for line in self.codelines ] def get_description(self): return self.name get_error_description = get_description def get_filenametable_entry(self): return "stringsource" def __hash__(self): return id(self) # Do not hash on the name, an identical string source should be the # same object (name is often defaulted in other places) # return hash(self.name) def __eq__(self, other): return isinstance(other, StringSourceDescriptor) and self.name == other.name def __repr__(self): return "<StringSourceDescriptor:%s>" % self.name #------------------------------------------------------------------ class PyrexScanner(Scanner): # context Context Compilation context # included_files [string] Files included with 'include' statement # compile_time_env dict Environment for conditional compilation # compile_time_eval boolean In a true conditional compilation context # compile_time_expr boolean In a compile-time expression context def __init__(self, file, filename, parent_scanner = None, scope = None, context = None, source_encoding=None, parse_comments=True, initial_pos=None): Scanner.__init__(self, get_lexicon(), file, filename, initial_pos) if parent_scanner: self.context = parent_scanner.context self.included_files = parent_scanner.included_files self.compile_time_env = parent_scanner.compile_time_env self.compile_time_eval = parent_scanner.compile_time_eval self.compile_time_expr = parent_scanner.compile_time_expr else: self.context = context self.included_files = scope.included_files self.compile_time_env = initial_compile_time_env() self.compile_time_eval = 1 self.compile_time_expr = 0 if hasattr(context.options, 'compile_time_env') and \ context.options.compile_time_env is not None: self.compile_time_env.update(context.options.compile_time_env) self.parse_comments = parse_comments self.source_encoding = source_encoding if filename.is_python_file(): self.in_python_file = True self.keywords = set(py_reserved_words) else: self.in_python_file = False self.keywords = set(pyx_reserved_words) self.trace = trace_scanner self.indentation_stack = [0] self.indentation_char = None self.bracket_nesting_level = 0 self.begin('INDENT') self.sy = '' self.next() def commentline(self, text): if self.parse_comments: self.produce('commentline', text) def current_level(self): return self.indentation_stack[-1] def open_bracket_action(self, text): self.bracket_nesting_level = self.bracket_nesting_level + 1 return text def close_bracket_action(self, text): self.bracket_nesting_level = self.bracket_nesting_level - 1 return text def newline_action(self, text): if self.bracket_nesting_level == 0: self.begin('INDENT') self.produce('NEWLINE', '') string_states = { "'": 'SQ_STRING', '"': 'DQ_STRING', "'''": 'TSQ_STRING', '"""': 'TDQ_STRING' } def begin_string_action(self, text): while text[:1] in any_string_prefix: text = text[1:] self.begin(self.string_states[text]) self.produce('BEGIN_STRING') def end_string_action(self, text): self.begin('') self.produce('END_STRING') def unclosed_string_action(self, text): self.end_string_action(text) self.error("Unclosed string literal") def indentation_action(self, text): self.begin('') # Indentation within brackets should be ignored. #if self.bracket_nesting_level > 0: # return # Check that tabs and spaces are being used consistently. if text: c = text[0] #print "Scanner.indentation_action: indent with", repr(c) ### if self.indentation_char is None: self.indentation_char = c #print "Scanner.indentation_action: setting indent_char to", repr(c) else: if self.indentation_char != c: self.error("Mixed use of tabs and spaces") if text.replace(c, "") != "": self.error("Mixed use of tabs and spaces") # Figure out how many indents/dedents to do current_level = self.current_level() new_level = len(text) #print "Changing indent level from", current_level, "to", new_level ### if new_level == current_level: return elif new_level > current_level: #print "...pushing level", new_level ### self.indentation_stack.append(new_level) self.produce('INDENT', '') else: while new_level < self.current_level(): #print "...popping level", self.indentation_stack[-1] ### self.indentation_stack.pop() self.produce('DEDENT', '') #print "...current level now", self.current_level() ### if new_level != self.current_level(): self.error("Inconsistent indentation") def eof_action(self, text): while len(self.indentation_stack) > 1: self.produce('DEDENT', '') self.indentation_stack.pop() self.produce('EOF', '') def next(self): try: sy, systring = self.read() except UnrecognizedInput: self.error("Unrecognized character") if sy == IDENT: if systring in self.keywords: if systring == u'print' and print_function in self.context.future_directives: self.keywords.discard('print') systring = EncodedString(systring) elif systring == u'exec' and self.context.language_level >= 3: self.keywords.discard('exec') systring = EncodedString(systring) else: sy = systring else: systring = EncodedString(systring) self.sy = sy self.systring = systring if False: # debug_scanner: _, line, col = self.position() if not self.systring or self.sy == self.systring: t = self.sy else: t = "%s %s" % (self.sy, self.systring) print("--- %3d %2d %s" % (line, col, t)) def peek(self): saved = self.sy, self.systring self.next() next = self.sy, self.systring self.unread(*next) self.sy, self.systring = saved return next def put_back(self, sy, systring): self.unread(self.sy, self.systring) self.sy = sy self.systring = systring def unread(self, token, value): # This method should be added to Plex self.queue.insert(0, (token, value)) def error(self, message, pos = None, fatal = True): if pos is None: pos = self.position() if self.sy == 'INDENT': err = error(pos, "Possible inconsistent indentation") err = error(pos, message) if fatal: raise err def expect(self, what, message = None): if self.sy == what: self.next() else: self.expected(what, message) def expect_keyword(self, what, message = None): if self.sy == IDENT and self.systring == what: self.next() else: self.expected(what, message) def expected(self, what, message = None): if message: self.error(message) else: if self.sy == IDENT: found = self.systring else: found = self.sy self.error("Expected '%s', found '%s'" % (what, found)) def expect_indent(self): self.expect('INDENT', "Expected an increase in indentation level") def expect_dedent(self): self.expect('DEDENT', "Expected a decrease in indentation level") def expect_newline(self, message = "Expected a newline"): # Expect either a newline or end of file if self.sy != 'EOF': self.expect('NEWLINE', message)
bsd-3-clause
espressopp/espressopp
src/analysis/RDFatomistic.py
1
8237
# Copyright (C) 2012,2013,2014,2015,2016,2017,2018 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************************** espressopp.analysis.RDFatomistic ******************************** Class to compute radial distribution functions in adaptive resolution simulations in subregions of the box. Can be used for regular atomistic/coarse-grained (AT/CG) adaptive resolution simulations as well as path integral-based adaptive resolution simulations. The two functions (compute, computePathIntegral) exhibit different behavior. The regular compute function is used for regular AT/CG simulations and there are two options: Option 1 (spanbased = True): the RDF can be calculated in a cuboid region in the center of the box (periodic in y,z, limited in x). In this case, particle pairs are considered for which at least one of them is in the defined cuboid region. This can be useful when the high resolution region has a slab geometry. No further normalization should be required. Option 2 (spanbased = False): the routine can also calculate unnormalized RDFs using particle pairs with both particles being in the high resolution region (based on the resolution value lambda, the span parameter is not used then). This can be useful when atomistic region has complicated or spherical geometries. In any case, only pairs of atomistic particles belonging to two different coarse-grained particles are considered. Furthermore, note that the routine uses L_y / half (L_y is the box length in y-direction) as the maximum distance for the RDF calculation, which is then binned according to rdfN during the computation. Hence, L_y should be the shortest box side (or, equally short as L_x and/or L_z). The computePathIntegral function is used for path integral-based adaptive resolution functions. It calculates the radial distribution functions over pairs of particles between different atoms or coarse-grained beads. Note, however, that in these types of quantum/classical adaptive resolution simulations, regular coarse-grained espressopp particles are associated with each atom and the additional "AdResS" atomistic particles correspond to the different Trotter beads. This means that the routine will, for molecules consisting of multiple atoms, calculate intramolecular rdfs, averaging over the Trotter bead pairs of the ring polymers, which represent the atoms. In doing so, it considers only particles pair with matching Trotter number and with the correct atomistic types. The results are averaged over all Trotter beads. Also in this case L_y / half (L_y is the box length in y-direction) is used as the maximum distance for the RDF calculation, which is then binned according to rdfN during the computation. Furthermore, the calculation is always "spanbased" in x direction (the function ignores the spanbased flag), but in such a fashion that BOTH particles need to be in the defined cuboid region. Normalization is performed as derived in R. Potestio et al., Phys. Rev. Lett. 111, 060601 (2013), Supp. Info. This means that, considering only particles with matching Trotter numbers, the computePathIntegral function calculates the RDF between particles of type A and B within a region bounded in x-direction by :math:`X_{min}` and :math:`X_{max}` as .. math:: g_{slab}^{ab}(r^{AB}) = \sum_{a \in N^A} \sum_{b \in N^B} \frac{1}{ N^A N^B} \frac{\delta_\Delta(|\mathbf{r}_a - \mathbf{r}_b| - r)}{v(\mathbf{r}_a)/V_{slab}} \delta_\Delta(r) = \begin{cases} 1 \quad \textrm{for} \quad r<\Delta \\ 0 \quad \textrm{otherwise} \end{cases} v(\mathbf{r}_a)=2\pi\Delta \; r_a(2r_a-h(\mathbf{r}_a)) h(\mathbf{r}_a) = (r_a - X^+)\theta(r_a - X^+) - (r_a - X^-)\theta(r_a - X^-) X^+ = X_{max} -x_a X^- = x_a - X_{min} \theta(r) = \begin{cases} 1 \quad \textrm{for} \quad r>0 \\ 0 \quad \textrm{otherwise} \end{cases} where :math:`N^A` and :math:`N^B` are the number of particles of type A and B in the relevant subregion for the RDF calculation and :math:`V_{slab}` is the total volume of this subregion. Furthermore, :math:`r_a` denotes the radius of the spherical shell for the RDF calculation around particle :math:`a`, :math:`\Delta` is the thickness of the shell, and :math:`x_a` is the :math:`x` coordinate of particle :math:`a`. The final result is an average over all imaginary time slices (Trotter numbers). Examples: >>> rdf_0_1 = espressopp.analysis.RDFatomistic(system = system, type1 = 0, type2 = 1, spanbased = True, span = 1.5) >>> # creates the class for calculating the RDF between atomistic particles of type 1 and 0 between different molecules, >>> # At least one of these particles has to be within plus/minus 1.5 from the center of the box in x-direction >>> rdf_0_1.compute(100) >>> # computes the rdf using 100 bins over a distance corresponding to L_y / 2.0 .. function:: espressopp.analysis.RDFatomistic(system, type1, type2, spanbased, span) Constructs the RDFatomistic object. :param system: system object :param type1: type of atom 1 :param type2: type of atom 2 :param spanbased: (default: True) If True, calculates RDFs in a cuboid region of radius span from the center (limited in x, periodic in y,z). If False, calculates RDFs with both particles being in the high resolution region (using lambda resolution values and ignoring span parameter). :param span: (default: 1.0) +/- distance from centre of box in x-direction of the cuboid region used for RDF calculation if spanbased == True. If spanbased == False, this parameter is not used. :type system: std::shared_ptr<System> :type type1: int :type type2: int :type spanbased: bool :type span: real .. function:: espressopp.analysis.RDFatomistic.compute(rdfN) Calculates the atomistic RDF assuming a regular atomistic/coarse-grained adaptive resolution setup. :param rdfN: number of bins :type rdfN: int :rtype: list of reals .. function:: espressopp.analysis.RDFatomistic.computePathIntegral(rdfN) Calculates the path integral-based RDF averaging over all Trotter bead pairs with the same Trotter bead number between different ring polymers assuming a path integral-based quantum/classical adaptive resolution setup. :param rdfN: number of bins :type rdfN: int :rtype: list of reals """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.analysis.Observable import * from _espressopp import analysis_RDFatomistic class RDFatomisticLocal(ObservableLocal, analysis_RDFatomistic): def __init__(self, system, type1, type2, span = 1.0, spanbased = True): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, analysis_RDFatomistic, system, type1, type2, span, spanbased) def compute(self, rdfN): return self.cxxclass.compute(self, rdfN) def computePathIntegral(self, rdfN): return self.cxxclass.computePathIntegral(self, rdfN) if pmi.isController : class RDFatomistic(Observable, metaclass=pmi.Proxy): pmiproxydefs = dict( pmicall = [ "compute", "computePathIntegral" ], cls = 'espressopp.analysis.RDFatomisticLocal' )
gpl-3.0
nathanielvarona/airflow
airflow/__main__.py
11
1368
#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Main executable module""" import os import argcomplete from airflow.cli import cli_parser from airflow.configuration import conf def main(): """Main executable function""" if conf.get("core", "security") == 'kerberos': os.environ['KRB5CCNAME'] = conf.get('kerberos', 'ccache') os.environ['KRB5_KTNAME'] = conf.get('kerberos', 'keytab') parser = cli_parser.get_parser() argcomplete.autocomplete(parser) args = parser.parse_args() args.func(args) if __name__ == '__main__': main()
apache-2.0
abhishekkrthakur/scikit-learn
doc/sphinxext/github_link.py
314
2661
from operator import attrgetter import inspect import subprocess import os import sys from functools import partial REVISION_CMD = 'git rev-parse --short HEAD' def _get_git_revision(): try: revision = subprocess.check_output(REVISION_CMD.split()).strip() except subprocess.CalledProcessError: print('Failed to execute git to get revision') return None return revision.decode('utf-8') def _linkcode_resolve(domain, info, package, url_fmt, revision): """Determine a link to online source for a class/method/function This is called by sphinx.ext.linkcode An example with a long-untouched module that everyone has >>> _linkcode_resolve('py', {'module': 'tty', ... 'fullname': 'setraw'}, ... package='tty', ... url_fmt='http://hg.python.org/cpython/file/' ... '{revision}/Lib/{package}/{path}#L{lineno}', ... revision='xxxx') 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' """ if revision is None: return if domain not in ('py', 'pyx'): return if not info.get('module') or not info.get('fullname'): return class_name = info['fullname'].split('.')[0] if type(class_name) != str: # Python 2 only class_name = class_name.encode('utf-8') module = __import__(info['module'], fromlist=[class_name]) obj = attrgetter(info['fullname'])(module) try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except Exception: fn = None if not fn: return fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) try: lineno = inspect.getsourcelines(obj)[1] except Exception: lineno = '' return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) def make_linkcode_resolve(package, url_fmt): """Returns a linkcode_resolve function for the given URL format revision is a git commit reference (hash or name) package is the name of the root module of the package url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 'blob/{revision}/{package}/' '{path}#L{lineno}') """ revision = _get_git_revision() return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt)
bsd-3-clause
OpusVL/odoo
addons/hr_holidays/tests/__init__.py
1
1092
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.hr_holidays.tests import test_holidays_flow checks = [ test_holidays_flow, ]
agpl-3.0
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_server_volume.py
19
4905
#!/usr/bin/python #coding: utf-8 -*- # Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: os_server_volume short_description: Attach/Detach Volumes from OpenStack VM's extends_documentation_fragment: openstack version_added: "2.0" author: "Monty Taylor (@emonty)" description: - Attach or Detach volumes from OpenStack VM's options: state: description: - Should the resource be present or absent. choices: [present, absent] default: present required: false server: description: - Name or ID of server you want to attach a volume to required: true volume: description: - Name or id of volume you want to attach to a server required: true device: description: - Device you want to attach. Defaults to auto finding a device name. required: false default: None availability_zone: description: - Ignored. Present for backwards compatibility required: false requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Attaches a volume to a compute host - name: attach a volume hosts: localhost tasks: - name: attach volume to host os_server_volume: state: present cloud: mordred server: Mysql-server volume: mysql-data device: /dev/vdb ''' try: import shade from shade import meta HAS_SHADE = True except ImportError: HAS_SHADE = False def _system_state_change(state, device): """Check if system state would change.""" if state == 'present': if device: return False return True if state == 'absent': if device: return True return False return False def main(): argument_spec = openstack_full_argument_spec( server=dict(required=True), volume=dict(required=True), device=dict(default=None), # None == auto choose device name state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') state = module.params['state'] wait = module.params['wait'] timeout = module.params['timeout'] try: cloud = shade.openstack_cloud(**module.params) server = cloud.get_server(module.params['server']) volume = cloud.get_volume(module.params['volume']) dev = cloud.get_volume_attach_device(volume, server.id) if module.check_mode: module.exit_json(changed=_system_state_change(state, dev)) if state == 'present': if dev: # Volume is already attached to this server module.exit_json(changed=False) cloud.attach_volume(server, volume, module.params['device'], wait=wait, timeout=timeout) server = cloud.get_server(module.params['server']) # refresh volume = cloud.get_volume(module.params['volume']) # refresh hostvars = meta.get_hostvars_from_server(cloud, server) module.exit_json( changed=True, id=volume['id'], attachments=volume['attachments'], openstack=hostvars ) elif state == 'absent': if not dev: # Volume is not attached to this server module.exit_json(changed=False) cloud.detach_volume(server, volume, wait=wait, timeout=timeout) module.exit_json( changed=True, result='Detached volume from server' ) except (shade.OpenStackCloudException, shade.OpenStackCloudTimeout) as e: module.fail_json(msg=str(e)) # this is magic, see lib/ansible/module_utils/common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
bsd-3-clause
donspaulding/adspygoogle
examples/adspygoogle/dfa/v1_20/use_oauth2.py
3
4219
#!/usr/bin/python # # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example demonstrates how to authenticate using OAuth2. This example is intended for users who wish to use the oauth2client library directly. Using a workflow similar to the example here, you can take advantage of the oauth2client in a broader range of contexts than caching your refresh token using the config.py scripts allows. You can avoid having to use the oauth2client library directly by using the Ads Python Client Library's config.py script to cache a client ID, client secret, and refresh token for reuse. This example is intended to be run from the command line as it takes user input. """ __author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)' import httplib2 import os import sys sys.path.insert(0, os.path.join('..', '..', '..', '..', '..')) from adspygoogle import DfaClient from oauth2client.client import FlowExchangeError from oauth2client.client import OAuth2WebServerFlow DFA_USER_PROFILE_NAME = 'INSERT_DFA_USER_PROFILE_NAME_HERE' # Visit https://code.google.com/apis/console to generate your client_id, # client_secret and to register your redirect_uri. # See the oauth2client wiki for more information on performing the OAuth2 flow: # http://code.google.com/p/google-api-python-client/wiki/OAuth2 OAUTH2_CLIENT_ID = 'INSERT_OAUTH2_CLIENT_ID_HERE' OAUTH2_CLIENT_SECRET = 'INSERT_OAUTH2_CLIENT_SECRET_HERE' def main(user_profile_name, oauth2_client_id, oauth2_client_secret): # We're using the oauth2client library: # http://code.google.com/p/google-api-python-client/downloads/list flow = OAuth2WebServerFlow( client_id=oauth2_client_id, client_secret=oauth2_client_secret, scope='https://www.googleapis.com/auth/dfatrafficking', user_agent='oauth2 code example', redirect_uri='urn:ietf:wg:oauth:2.0:oob') # Get the authorization URL to direct the user to. authorize_url = flow.step1_get_authorize_url() print ('Log in to your Google Account and open the following URL: \n%s\n' % authorize_url) print 'After approving the token enter the verification code (if specified).' code = raw_input('Code: ').strip() credential = None try: credential = flow.step2_exchange(code) except FlowExchangeError, e: sys.exit('Authentication has failed: %s' % e) # Create the DfpClient and set the OAuth2 credentials. client = DfaClient(headers={ 'Username': user_profile_name, 'oauth2credentials': credential }) # OAuth2 credentials objects can be reused credentials = client.oauth2credentials print 'OAuth2 authorization successful!' # OAuth2 credential objects can be refreshed via credentials.refresh() - the # access token expires after 1 hour. credentials.refresh(httplib2.Http()) # Note: you could simply set the credentials as below and skip the previous # steps once access has been granted. client.oauth2credentials = credentials advertiser_service = client.GetAdvertiserService(version='v1.20') advertiser_search_criteria = { 'pageSize': '10' } # Get advertiser record set. results = advertiser_service.GetAdvertisers(advertiser_search_criteria)[0] # Display advertiser names, IDs and spotlight configuration IDs. if results['records']: for advertiser in results['records']: print ('Advertiser with name \'%s\', ID \'%s\', and spotlight ' 'configuration id \'%s\' was found.' % (advertiser['name'], advertiser['id'], advertiser['spotId'])) else: print 'No advertisers found for your criteria.' if __name__ == '__main__': main(DFA_USER_PROFILE_NAME, OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET)
apache-2.0
leorochael/odoo
addons/account_payment/__openerp__.py
261
2925
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Suppliers Payment Management', 'version': '1.1', 'author': 'OpenERP SA', 'category': 'Accounting & Finance', 'description': """ Module to manage the payment of your supplier invoices. ======================================================= This module allows you to create and manage your payment orders, with purposes to --------------------------------------------------------------------------------- * serve as base for an easy plug-in of various automated payment mechanisms. * provide a more efficient way to manage invoice payment. Warning: ~~~~~~~~ The confirmation of a payment order does _not_ create accounting entries, it just records the fact that you gave your payment order to your bank. The booking of your order must be encoded as usual through a bank statement. Indeed, it's only when you get the confirmation from your bank that your order has been accepted that you can book it in your accounting. To help you with that operation, you have a new option to import payment orders as bank statement lines. """, 'depends': ['account','account_voucher'], 'data': [ 'security/account_payment_security.xml', 'security/ir.model.access.csv', 'wizard/account_payment_pay_view.xml', 'wizard/account_payment_populate_statement_view.xml', 'wizard/account_payment_create_order_view.xml', 'account_payment_view.xml', 'account_payment_workflow.xml', 'account_payment_sequence.xml', 'account_payment_report.xml', 'views/report_paymentorder.xml', ], 'demo': ['account_payment_demo.xml'], 'test': [ 'test/account_payment_demo.yml', 'test/cancel_payment_order.yml', 'test/payment_order_process.yml', 'test/account_payment_report.yml', ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ashemedai/ansible
lib/ansible/modules/network/asa/asa_acl.py
51
7973
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: asa_acl version_added: "2.2" author: "Patrick Ogenstad (@ogenstad)" short_description: Manage access-lists on a Cisco ASA description: - This module allows you to work with access-lists on a Cisco ASA device. extends_documentation_fragment: asa options: lines: description: - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. required: true before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command stack if a changed needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false default: null match: description: - Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect to position. Finally if match is set to I(exact), command lines must be an equal match. required: false default: line choices: ['line', 'strict', 'exact'] replace: description: - Instructs the module on the way to perform the configuration on the device. If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. required: false default: line choices: ['line', 'block'] force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. required: false default: false choices: ['yes', 'no'] config: description: - The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the implementer to pass in the configuruation to use as the base config for comparison. required: false default: null """ EXAMPLES = """ # Note: examples below use the following provider dict to handle # transport and authentication to the node. --- vars: cli: host: "{{ inventory_hostname }}" username: cisco password: cisco transport: cli authorize: yes auth_pass: cisco --- - asa_acl: lines: - access-list ACL-ANSIBLE extended permit tcp any any eq 82 - access-list ACL-ANSIBLE extended permit tcp any any eq www - access-list ACL-ANSIBLE extended permit tcp any any eq 97 - access-list ACL-ANSIBLE extended permit tcp any any eq 98 - access-list ACL-ANSIBLE extended permit tcp any any eq 99 before: clear configure access-list ACL-ANSIBLE match: strict replace: block provider: "{{ cli }}" - asa_acl: lines: - access-list ACL-OUTSIDE extended permit tcp any any eq www - access-list ACL-OUTSIDE extended permit tcp any any eq https context: customer_a provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['...', '...'] responses: description: The set of responses from issuing the commands on the device returned: when not check_mode type: list sample: ['...', '...'] """ import ansible.module_utils.asa from ansible.module_utils.network import NetworkModule from ansible.module_utils.netcfg import NetworkConfig, dumps def get_config(module, acl_name): contents = module.params['config'] if not contents: contents = module.config.get_config() filtered_config = list() for item in contents.split('\n'): if item.startswith('access-list %s ' % acl_name): filtered_config.append(item) return NetworkConfig(indent=1, contents='\n'.join(filtered_config)) def parse_acl_name(module): first_line = True for line in module.params['lines']: ace = line.split() if ace[0] != 'access-list': module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0]) if len(ace) <= 1: module.fail_json(msg='All lines/commands must contain the name of the access-list') if first_line: acl_name = ace[1] else: if acl_name != ace[1]: module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name)) first_line = False return acl_name def main(): argument_spec = dict( lines=dict(aliases=['commands'], required=True, type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact']), replace=dict(default='line', choices=['line', 'block']), force=dict(default=False, type='bool'), config=dict() ) module = NetworkModule(argument_spec=argument_spec, supports_check_mode=True) lines = module.params['lines'] before = module.params['before'] after = module.params['after'] match = module.params['match'] replace = module.params['replace'] result = dict(changed=False) candidate = NetworkConfig(indent=1) candidate.add(lines) acl_name = parse_acl_name(module) if not module.params['force']: contents = get_config(module, acl_name) config = NetworkConfig(indent=1, contents=contents) commands = candidate.difference(config) commands = dumps(commands, 'commands').split('\n') commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: response = module.config(commands) result['responses'] = response result['changed'] = True result['updates'] = commands module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
lavindev/DECAF
decaf/roms/seabios/tools/checkstack.py
38
7820
#!/usr/bin/env python # Script that tries to find how much stack space each function in an # object is using. # # Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net> # # This file may be distributed under the terms of the GNU GPLv3 license. # Usage: # objdump -m i386 -M i8086 -M suffix -d out/rom16.o | tools/checkstack.py import sys import re # Functions that change stacks STACKHOP = ['__send_disk_op'] # List of functions we can assume are never called. #IGNORE = ['panic', '__dprintf'] IGNORE = ['panic'] OUTPUTDESC = """ #funcname1[preamble_stack_usage,max_usage_with_callers]: # insn_addr:called_function [usage_at_call_point+caller_preamble,total_usage] # #funcname2[p,m,max_usage_to_yield_point]: # insn_addr:called_function [u+c,t,usage_to_yield_point] """ # Find out maximum stack usage for a function def calcmaxstack(funcs, funcaddr): info = funcs[funcaddr] # Find max of all nested calls. maxusage = info[1] maxyieldusage = doesyield = 0 if info[3] is not None: maxyieldusage = info[3] doesyield = 1 info[2] = maxusage info[4] = info[3] seenbefore = {} totcalls = 0 for insnaddr, calladdr, usage in info[6]: callinfo = funcs.get(calladdr) if callinfo is None: continue if callinfo[2] is None: calcmaxstack(funcs, calladdr) if callinfo[0] not in seenbefore: seenbefore[callinfo[0]] = 1 totcalls += 1 + callinfo[5] funcnameroot = callinfo[0].split('.')[0] if funcnameroot in IGNORE: # This called function is ignored - don't contribute it to # the max stack. continue if funcnameroot in STACKHOP: if usage > maxusage: maxusage = usage if callinfo[4] is not None: doesyield = 1 if usage > maxyieldusage: maxyieldusage = usage continue totusage = usage + callinfo[2] if totusage > maxusage: maxusage = totusage if callinfo[4] is not None: doesyield = 1 totyieldusage = usage + callinfo[4] if totyieldusage > maxyieldusage: maxyieldusage = totyieldusage info[2] = maxusage if doesyield: info[4] = maxyieldusage info[5] = totcalls # Try to arrange output so that functions that call each other are # near each other. def orderfuncs(funcaddrs, availfuncs): l = [(availfuncs[funcaddr][5], availfuncs[funcaddr][0], funcaddr) for funcaddr in funcaddrs if funcaddr in availfuncs] l.sort() l.reverse() out = [] while l: count, name, funcaddr = l.pop(0) if funcaddr not in availfuncs: continue calladdrs = [calls[1] for calls in availfuncs[funcaddr][6]] del availfuncs[funcaddr] out = out + orderfuncs(calladdrs, availfuncs) + [funcaddr] return out # Update function info with a found "yield" point. def noteYield(info, stackusage): prevyield = info[3] if prevyield is None or prevyield < stackusage: info[3] = stackusage # Update function info with a found "call" point. def noteCall(info, subfuncs, insnaddr, calladdr, stackusage): if (calladdr, stackusage) in subfuncs: # Already noted a nearly identical call - ignore this one. return info[6].append((insnaddr, calladdr, stackusage)) subfuncs[(calladdr, stackusage)] = 1 hex_s = r'[0-9a-f]+' re_func = re.compile(r'^(?P<funcaddr>' + hex_s + r') <(?P<func>.*)>:$') re_asm = re.compile( r'^[ ]*(?P<insnaddr>' + hex_s + r'):\t.*\t(addr32 )?(?P<insn>.+?)[ ]*((?P<calladdr>' + hex_s + r') <(?P<ref>.*)>)?$') re_usestack = re.compile( r'^(push[f]?[lw])|(sub.* [$](?P<num>0x' + hex_s + r'),%esp)$') def calc(): # funcs[funcaddr] = [funcname, basicstackusage, maxstackusage # , yieldusage, maxyieldusage, totalcalls # , [(insnaddr, calladdr, stackusage), ...]] funcs = {-1: ['<indirect>', 0, 0, None, None, 0, []]} cur = None atstart = 0 stackusage = 0 # Parse input lines for line in sys.stdin.readlines(): m = re_func.match(line) if m is not None: # Found function funcaddr = int(m.group('funcaddr'), 16) funcs[funcaddr] = cur = [m.group('func'), 0, None, None, None, 0, []] stackusage = 0 atstart = 1 subfuncs = {} continue m = re_asm.match(line) if m is not None: insn = m.group('insn') im = re_usestack.match(insn) if im is not None: if insn.startswith('pushl') or insn.startswith('pushfl'): stackusage += 4 continue elif insn.startswith('pushw') or insn.startswith('pushfw'): stackusage += 2 continue stackusage += int(im.group('num'), 16) if atstart: if insn == 'movl %esp,%ebp': # Still part of initial header continue cur[1] = stackusage atstart = 0 insnaddr = m.group('insnaddr') calladdr = m.group('calladdr') if calladdr is None: if insn.startswith('lcallw'): noteCall(cur, subfuncs, insnaddr, -1, stackusage + 4) noteYield(cur, stackusage + 4) elif insn.startswith('int'): noteCall(cur, subfuncs, insnaddr, -1, stackusage + 6) noteYield(cur, stackusage + 6) elif insn.startswith('sti'): noteYield(cur, stackusage) else: # misc instruction continue else: # Jump or call insn calladdr = int(calladdr, 16) ref = m.group('ref') if '+' in ref: # Inter-function jump. pass elif insn.startswith('j'): # Tail call noteCall(cur, subfuncs, insnaddr, calladdr, 0) elif insn.startswith('calll'): noteCall(cur, subfuncs, insnaddr, calladdr, stackusage + 4) else: print "unknown call", ref noteCall(cur, subfuncs, insnaddr, calladdr, stackusage) # Reset stack usage to preamble usage stackusage = cur[1] #print "other", repr(line) # Calculate maxstackusage for funcaddr, info in funcs.items(): if info[2] is not None: continue calcmaxstack(funcs, funcaddr) # Sort functions for output funcaddrs = orderfuncs(funcs.keys(), funcs.copy()) # Show all functions print OUTPUTDESC for funcaddr in funcaddrs: name, basicusage, maxusage, yieldusage, maxyieldusage, count, calls = \ funcs[funcaddr] if maxusage == 0 and maxyieldusage is None: continue yieldstr = "" if maxyieldusage is not None: yieldstr = ",%d" % maxyieldusage print "\n%s[%d,%d%s]:" % (name, basicusage, maxusage, yieldstr) for insnaddr, calladdr, stackusage in calls: callinfo = funcs.get(calladdr, ("<unknown>", 0, 0, 0, None)) yieldstr = "" if callinfo[4] is not None: yieldstr = ",%d" % (stackusage + callinfo[4]) print " %04s:%-40s [%d+%d,%d%s]" % ( insnaddr, callinfo[0], stackusage, callinfo[1] , stackusage+callinfo[2], yieldstr) def main(): calc() if __name__ == '__main__': main()
gpl-3.0
HyperBaton/ansible
lib/ansible/modules/storage/netapp/netapp_e_amg.py
21
10291
#!/usr/bin/python # (c) 2016, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: netapp_e_amg short_description: NetApp E-Series create, remove, and update asynchronous mirror groups description: - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays version_added: '2.2' author: Kevin Hulquest (@hulquest) extends_documentation_fragment: - netapp.eseries options: name: description: - The name of the async array you wish to target, or create. - If C(state) is present and the name isn't found, it will attempt to create. required: yes secondaryArrayId: description: - The ID of the secondary array to be used in mirroring process required: yes syncIntervalMinutes: description: - The synchronization interval in minutes default: 10 manualSync: description: - Setting this to true will cause other synchronization values to be ignored type: bool default: 'no' recoveryWarnThresholdMinutes: description: - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value default: 20 repoUtilizationWarnThreshold: description: - Recovery point warning threshold default: 80 interfaceType: description: - The intended protocol to use if both Fibre and iSCSI are available. choices: - iscsi - fibre syncWarnThresholdMinutes: description: - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete. default: 10 state: description: - A C(state) of present will either create or update the async mirror group. - A C(state) of absent will remove the async mirror group. choices: [ absent, present ] required: yes """ EXAMPLES = """ - name: AMG removal na_eseries_amg: state: absent ssid: "{{ ssid }}" secondaryArrayId: "{{amg_secondaryArrayId}}" api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" new_name: "{{amg_array_name}}" name: "{{amg_name}}" when: amg_create - name: AMG create netapp_e_amg: state: present ssid: "{{ ssid }}" secondaryArrayId: "{{amg_secondaryArrayId}}" api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" new_name: "{{amg_array_name}}" name: "{{amg_name}}" when: amg_create """ RETURN = """ msg: description: Successful creation returned: success type: str sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}' """ # NOQA import json import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.netapp import request, eseries_host_argument_spec HEADERS = { "Content-Type": "application/json", "Accept": "application/json", } def has_match(module, ssid, api_url, api_pwd, api_usr, body): compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] desired_state = dict((x, (body.get(x))) for x in compare_keys) label_exists = False matches_spec = False current_state = None async_id = None api_data = None desired_name = body.get('name') endpoint = 'storage-systems/%s/async-mirrors' % ssid url = api_url + endpoint try: rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS) except Exception as e: module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc()) for async_group in data: if async_group['label'] == desired_name: label_exists = True api_data = async_group async_id = async_group['groupRef'] current_state = dict( syncIntervalMinutes=async_group['syncIntervalMinutes'], syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'], recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'], repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'], ) if current_state == desired_state: matches_spec = True return label_exists, matches_spec, api_data, async_id def create_async(module, ssid, api_url, api_pwd, api_usr, body): endpoint = 'storage-systems/%s/async-mirrors' % ssid url = api_url + endpoint post_data = json.dumps(body) try: rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd, headers=HEADERS) except Exception as e: module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e), exception=traceback.format_exc()) return data def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id): endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) url = api_url + endpoint compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] desired_state = dict((x, (body.get(x))) for x in compare_keys) if new_name: desired_state['new_name'] = new_name post_data = json.dumps(desired_state) try: rc, data = request(url, data=post_data, method='POST', headers=HEADERS, url_username=user, url_password=pwd) except Exception as e: module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e), exception=traceback.format_exc()) return data def remove_amg(module, ssid, api_url, pwd, user, async_id): endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) url = api_url + endpoint try: rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS) except Exception as e: module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e), exception=traceback.format_exc()) return def main(): argument_spec = eseries_host_argument_spec() argument_spec.update(dict( name=dict(required=True, type='str'), new_name=dict(required=False, type='str'), secondaryArrayId=dict(required=True, type='str'), syncIntervalMinutes=dict(required=False, default=10, type='int'), manualSync=dict(required=False, default=False, type='bool'), recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'), repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'), interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'), state=dict(required=True, choices=['present', 'absent']), syncWarnThresholdMinutes=dict(required=False, default=10, type='int') )) module = AnsibleModule(argument_spec=argument_spec) p = module.params ssid = p.pop('ssid') api_url = p.pop('api_url') user = p.pop('api_username') pwd = p.pop('api_password') new_name = p.pop('new_name') state = p.pop('state') if not api_url.endswith('/'): api_url += '/' name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p) if state == 'present': if name_exists and spec_matches: module.exit_json(changed=False, msg="Desired state met", **api_data) elif name_exists and not spec_matches: results = update_async(module, ssid, api_url, pwd, user, p, new_name, async_id) module.exit_json(changed=True, msg="Async mirror group updated", async_id=async_id, **results) elif not name_exists: results = create_async(module, ssid, api_url, user, pwd, p) module.exit_json(changed=True, **results) elif state == 'absent': if name_exists: remove_amg(module, ssid, api_url, pwd, user, async_id) module.exit_json(changed=True, msg="Async mirror group removed.", async_id=async_id) else: module.exit_json(changed=False, msg="Async Mirror group: %s already absent" % p['name']) if __name__ == '__main__': main()
gpl-3.0
alephdata/ingestors
ingestors/tabular/xls.py
1
2360
import xlrd import logging from datetime import datetime, time from xlrd.biffh import XLRDError from followthemoney import model from ingestors.ingestor import Ingestor from ingestors.support.table import TableSupport from ingestors.support.ole import OLESupport from ingestors.exc import ProcessingException log = logging.getLogger(__name__) class ExcelIngestor(Ingestor, TableSupport, OLESupport): MIME_TYPES = [ "application/excel", "application/x-excel", "application/vnd.ms-excel", "application/x-msexcel", ] EXTENSIONS = ["xls", "xlt", "xla"] SCORE = 7 def convert_cell(self, cell, sheet): value = cell.value try: if cell.ctype == 3: if value == 0: return None year, month, day, hour, minute, second = xlrd.xldate_as_tuple( value, sheet.book.datemode ) if (year, month, day) == (0, 0, 0): value = time(hour, minute, second) return value.isoformat() else: return datetime(year, month, day, hour, minute, second) except Exception as exc: log.warning("Error in Excel value [%s]: %s", cell, exc) return value def generate_csv(self, sheet): for row_index in range(0, sheet.nrows): yield [self.convert_cell(c, sheet) for c in sheet.row(row_index)] def ingest(self, file_path, entity): entity.schema = model.get("Workbook") self.extract_ole_metadata(file_path, entity) try: book = xlrd.open_workbook(file_path, formatting_info=False) except Exception as err: raise ProcessingException("Invalid Excel file: %s" % err) from err try: for sheet in book.sheets(): table = self.manager.make_entity("Table", parent=entity) table.make_id(entity.id, sheet.name) table.set("title", sheet.name) self.emit_row_tuples(table, self.generate_csv(sheet)) if table.has("csvHash"): self.manager.emit_entity(table) except XLRDError as err: raise ProcessingException("Invalid Excel file: %s" % err) from err finally: book.release_resources()
mit
trishnaguha/ansible
lib/ansible/plugins/connection/persistent.py
15
5762
# 2017 Red Hat Inc. # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: persistent short_description: Use a persistent unix socket for connection description: - This is a helper plugin to allow making other connections persistent. version_added: "2.3" options: persistent_command_timeout: type: int description: - Configures, in seconds, the amount of time to wait for a command to return from the remote device. If this timer is exceeded before the command returns, the connection plugin will raise an exception and close default: 10 ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout """ import os import pty import json import subprocess import sys import termios from ansible import constants as C from ansible.plugins.connection import ConnectionBase from ansible.module_utils._text import to_text from ansible.module_utils.connection import Connection as SocketConnection, write_to_file_descriptor from ansible.errors import AnsibleError from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): ''' Local based connections ''' transport = 'persistent' has_pipelining = False def _connect(self): self._connected = True return self def exec_command(self, cmd, in_data=None, sudoable=True): display.vvvv('exec_command(), socket_path=%s' % self.socket_path, host=self._play_context.remote_addr) connection = SocketConnection(self.socket_path) out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) return 0, out, '' def put_file(self, in_path, out_path): pass def fetch_file(self, in_path, out_path): pass def close(self): self._connected = False def run(self): """Returns the path of the persistent connection socket. Attempts to ensure (within playcontext.timeout seconds) that the socket path exists. If the path exists (or the timeout has expired), returns the socket path. """ display.vvvv('starting connection from persistent connection plugin', host=self._play_context.remote_addr) socket_path = self._start_connection() display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr) setattr(self, '_socket_path', socket_path) return socket_path def _start_connection(self): ''' Starts the persistent connection ''' candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])] candidate_paths.extend(os.environ['PATH'].split(os.pathsep)) for dirname in candidate_paths: ansible_connection = os.path.join(dirname, 'ansible-connection') if os.path.isfile(ansible_connection): break else: raise AnsibleError("Unable to find location of 'ansible-connection'. " "Please set or check the value of ANSIBLE_CONNECTION_PATH") python = sys.executable master, slave = pty.openpty() p = subprocess.Popen( [python, ansible_connection, to_text(os.getppid())], stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) os.close(slave) # We need to set the pty into noncanonical mode. This ensures that we # can receive lines longer than 4095 characters (plus newline) without # truncating. old = termios.tcgetattr(master) new = termios.tcgetattr(master) new[3] = new[3] & ~termios.ICANON try: termios.tcsetattr(master, termios.TCSANOW, new) write_to_file_descriptor(master, {'ansible_command_timeout': self.get_option('persistent_command_timeout')}) write_to_file_descriptor(master, self._play_context.serialize()) (stdout, stderr) = p.communicate() finally: termios.tcsetattr(master, termios.TCSANOW, old) os.close(master) if p.returncode == 0: result = json.loads(to_text(stdout, errors='surrogate_then_replace')) else: try: result = json.loads(to_text(stderr, errors='surrogate_then_replace')) except getattr(json.decoder, 'JSONDecodeError', ValueError): # JSONDecodeError only available on Python 3.5+ result = {'error': to_text(stderr, errors='surrogate_then_replace')} if 'messages' in result: for level, message in result['messages']: if level == 'log': display.display(message, log_only=True) elif level in ('debug', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv'): getattr(display, level)(message, host=self._play_context.remote_addr) else: if hasattr(display, level): getattr(display, level)(message) else: display.vvvv(message, host=self._play_context.remote_addr) if 'error' in result: if self._play_context.verbosity > 2: if result.get('exception'): msg = "The full traceback is:\n" + result['exception'] display.display(msg, color=C.COLOR_ERROR) raise AnsibleError(result['error']) return result['socket_path']
gpl-3.0
t794104/ansible
lib/ansible/parsing/vault/__init__.py
10
52531
# (c) 2014, James Tanner <tanner.jc@gmail.com> # (c) 2016, Adrian Likins <alikins@redhat.com> # (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com> # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import random import shlex import shutil import subprocess import sys import tempfile import warnings from binascii import hexlify from binascii import unhexlify from binascii import Error as BinasciiError HAS_CRYPTOGRAPHY = False HAS_PYCRYPTO = False HAS_SOME_PYCRYPTO = False CRYPTOGRAPHY_BACKEND = None try: with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, padding from cryptography.hazmat.primitives.hmac import HMAC from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from cryptography.hazmat.primitives.ciphers import ( Cipher as C_Cipher, algorithms, modes ) CRYPTOGRAPHY_BACKEND = default_backend() HAS_CRYPTOGRAPHY = True except ImportError: pass try: from Crypto.Cipher import AES as AES_pycrypto HAS_SOME_PYCRYPTO = True # Note: Only used for loading obsolete VaultAES files. All files are written # using the newer VaultAES256 which does not require md5 from Crypto.Hash import SHA256 as SHA256_pycrypto from Crypto.Hash import HMAC as HMAC_pycrypto # Counter import fails for 2.0.1, requires >= 2.6.1 from pip from Crypto.Util import Counter as Counter_pycrypto # KDF import fails for 2.0.1, requires >= 2.6.1 from pip from Crypto.Protocol.KDF import PBKDF2 as PBKDF2_pycrypto HAS_PYCRYPTO = True except ImportError: pass from ansible.errors import AnsibleError, AnsibleAssertionError from ansible import constants as C from ansible.module_utils.six import PY3, binary_type # Note: on py2, this zip is izip not the list based zip() builtin from ansible.module_utils.six.moves import zip from ansible.module_utils._text import to_bytes, to_text, to_native from ansible.utils.display import Display from ansible.utils.path import makedirs_safe display = Display() b_HEADER = b'$ANSIBLE_VAULT' CIPHER_WHITELIST = frozenset((u'AES', u'AES256')) CIPHER_WRITE_WHITELIST = frozenset((u'AES256',)) # See also CIPHER_MAPPING at the bottom of the file which maps cipher strings # (used in VaultFile header) to a cipher class NEED_CRYPTO_LIBRARY = "ansible-vault requires either the cryptography library (preferred) or" if HAS_SOME_PYCRYPTO: NEED_CRYPTO_LIBRARY += " a newer version of" NEED_CRYPTO_LIBRARY += " pycrypto in order to function." class AnsibleVaultError(AnsibleError): pass class AnsibleVaultPasswordError(AnsibleVaultError): pass class AnsibleVaultFormatError(AnsibleError): pass def is_encrypted(data): """ Test if this is vault encrypted data blob :arg data: a byte or text string to test whether it is recognized as vault encrypted data :returns: True if it is recognized. Otherwise, False. """ try: # Make sure we have a byte string and that it only contains ascii # bytes. b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict') except (UnicodeError, TypeError): # The vault format is pure ascii so if we failed to encode to bytes # via ascii we know that this is not vault data. # Similarly, if it's not a string, it's not vault data return False if b_data.startswith(b_HEADER): return True return False def is_encrypted_file(file_obj, start_pos=0, count=-1): """Test if the contents of a file obj are a vault encrypted data blob. :arg file_obj: A file object that will be read from. :kwarg start_pos: A byte offset in the file to start reading the header from. Defaults to 0, the beginning of the file. :kwarg count: Read up to this number of bytes from the file to determine if it looks like encrypted vault data. The default is -1, read to the end of file. :returns: True if the file looks like a vault file. Otherwise, False. """ # read the header and reset the file stream to where it started current_position = file_obj.tell() try: file_obj.seek(start_pos) return is_encrypted(file_obj.read(count)) finally: file_obj.seek(current_position) def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None): b_tmpdata = b_vaulttext_envelope.splitlines() b_tmpheader = b_tmpdata[0].strip().split(b';') b_version = b_tmpheader[1].strip() cipher_name = to_text(b_tmpheader[2].strip()) vault_id = default_vault_id # Only attempt to find vault_id if the vault file is version 1.2 or newer # if self.b_version == b'1.2': if len(b_tmpheader) >= 4: vault_id = to_text(b_tmpheader[3].strip()) b_ciphertext = b''.join(b_tmpdata[1:]) return b_ciphertext, b_version, cipher_name, vault_id def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None): """Parse the vaulttext envelope When data is saved, it has a header prepended and is formatted into 80 character lines. This method extracts the information from the header and then removes the header and the inserted newlines. The string returned is suitable for processing by the Cipher classes. :arg b_vaulttext: byte str containing the data from a save file :kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one. :kwarg filename: The filename that the data came from. This is only used to make better error messages in case the data cannot be decrypted. This is optional. :returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext, a byte str of the vault format version, the name of the cipher used, and the vault_id. :raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid """ # used by decrypt default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY try: return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id) except Exception as exc: msg = "Vault envelope format error" if filename: msg += ' in %s' % (filename) msg += ': %s' % exc raise AnsibleVaultFormatError(msg) def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None): """ Add header and format to 80 columns :arg b_ciphertext: the encrypted and hexlified data as a byte string :arg cipher_name: unicode cipher name (for ex, u'AES256') :arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default) :arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2. :returns: a byte str that should be dumped into a file. It's formatted to 80 char columns and has the header prepended """ if not cipher_name: raise AnsibleError("the cipher must be set before adding a header") version = version or '1.1' # If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1 if vault_id and vault_id != u'default': version = '1.2' b_version = to_bytes(version, 'utf-8', errors='strict') b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict') b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict') header_parts = [b_HEADER, b_version, b_cipher_name] if b_version == b'1.2' and b_vault_id: header_parts.append(b_vault_id) header = b';'.join(header_parts) b_vaulttext = [header] b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)] b_vaulttext += [b''] b_vaulttext = b'\n'.join(b_vaulttext) return b_vaulttext def _unhexlify(b_data): try: return unhexlify(b_data) except (BinasciiError, TypeError) as exc: raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc) def _parse_vaulttext(b_vaulttext): b_vaulttext = _unhexlify(b_vaulttext) b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2) b_salt = _unhexlify(b_salt) b_ciphertext = _unhexlify(b_ciphertext) return b_ciphertext, b_salt, b_crypted_hmac def parse_vaulttext(b_vaulttext): """Parse the vaulttext :arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac) :returns: A tuple of byte str of the ciphertext suitable for passing to a Cipher class's decrypt() function, a byte str of the salt, and a byte str of the crypted_hmac :raises: AnsibleVaultFormatError: if the vaulttext format is invalid """ # SPLIT SALT, DIGEST, AND DATA try: return _parse_vaulttext(b_vaulttext) except AnsibleVaultFormatError: raise except Exception as exc: msg = "Vault vaulttext format error: %s" % exc raise AnsibleVaultFormatError(msg) def verify_secret_is_not_empty(secret, msg=None): '''Check the secret against minimal requirements. Raises: AnsibleVaultPasswordError if the password does not meet requirements. Currently, only requirement is that the password is not None or an empty string. ''' msg = msg or 'Invalid vault password was provided' if not secret: raise AnsibleVaultPasswordError(msg) class VaultSecret: '''Opaque/abstract objects for a single vault secret. ie, a password or a key.''' def __init__(self, _bytes=None): # FIXME: ? that seems wrong... Unset etc? self._bytes = _bytes @property def bytes(self): '''The secret as a bytestring. Sub classes that store text types will need to override to encode the text to bytes. ''' return self._bytes def load(self): return self._bytes class PromptVaultSecret(VaultSecret): default_prompt_formats = ["Vault password (%s): "] def __init__(self, _bytes=None, vault_id=None, prompt_formats=None): super(PromptVaultSecret, self).__init__(_bytes=_bytes) self.vault_id = vault_id if prompt_formats is None: self.prompt_formats = self.default_prompt_formats else: self.prompt_formats = prompt_formats @property def bytes(self): return self._bytes def load(self): self._bytes = self.ask_vault_passwords() def ask_vault_passwords(self): b_vault_passwords = [] for prompt_format in self.prompt_formats: prompt = prompt_format % {'vault_id': self.vault_id} try: vault_pass = display.prompt(prompt, private=True) except EOFError: raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id) verify_secret_is_not_empty(vault_pass) b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() b_vault_passwords.append(b_vault_pass) # Make sure the passwords match by comparing them all to the first password for b_vault_password in b_vault_passwords: self.confirm(b_vault_passwords[0], b_vault_password) if b_vault_passwords: return b_vault_passwords[0] return None def confirm(self, b_vault_pass_1, b_vault_pass_2): # enforce no newline chars at the end of passwords if b_vault_pass_1 != b_vault_pass_2: # FIXME: more specific exception raise AnsibleError("Passwords do not match") def script_is_client(filename): '''Determine if a vault secret script is a client script that can be given --vault-id args''' # if password script is 'something-client' or 'something-client.[sh|py|rb|etc]' # script_name can still have '.' or could be entire filename if there is no ext script_name, dummy = os.path.splitext(filename) # TODO: for now, this is entirely based on filename if script_name.endswith('-client'): return True return False def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None): this_path = os.path.realpath(os.path.expanduser(filename)) if not os.path.exists(this_path): raise AnsibleError("The vault password file %s was not found" % this_path) if loader.is_executable(this_path): if script_is_client(filename): display.vvvv(u'The vault password file %s is a client script.' % to_text(filename)) # TODO: pass vault_id_name to script via cli return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id, encoding=encoding, loader=loader) # just a plain vault password script. No args, returns a byte array return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader) return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader) # TODO: mv these classes to a separate file so we don't pollute vault with 'subprocess' etc class FileVaultSecret(VaultSecret): def __init__(self, filename=None, encoding=None, loader=None): super(FileVaultSecret, self).__init__() self.filename = filename self.loader = loader self.encoding = encoding or 'utf8' # We could load from file here, but that is eventually a pain to test self._bytes = None self._text = None @property def bytes(self): if self._bytes: return self._bytes if self._text: return self._text.encode(self.encoding) return None def load(self): self._bytes = self._read_file(self.filename) def _read_file(self, filename): """ Read a vault password from a file or if executable, execute the script and retrieve password from STDOUT """ # TODO: replace with use of self.loader try: f = open(filename, "rb") vault_pass = f.read().strip() f.close() except (OSError, IOError) as e: raise AnsibleError("Could not read vault password file %s: %s" % (filename, e)) b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename) vault_pass = b_vault_data.strip(b'\r\n') verify_secret_is_not_empty(vault_pass, msg='Invalid vault password was provided from file (%s)' % filename) return vault_pass def __repr__(self): if self.filename: return "%s(filename='%s')" % (self.__class__.__name__, self.filename) return "%s()" % (self.__class__.__name__) class ScriptVaultSecret(FileVaultSecret): def _read_file(self, filename): if not self.loader.is_executable(filename): raise AnsibleVaultError("The vault password script %s was not executable" % filename) command = self._build_command() stdout, stderr, p = self._run(command) self._check_results(stdout, stderr, p) vault_pass = stdout.strip(b'\r\n') empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename verify_secret_is_not_empty(vault_pass, msg=empty_password_msg) return vault_pass def _run(self, command): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(command, stdout=subprocess.PIPE) except OSError as e: msg_format = "Problem running vault password script %s (%s)." \ " If this is not a script, remove the executable bit from the file." msg = msg_format % (self.filename, e) raise AnsibleError(msg) stdout, stderr = p.communicate() return stdout, stderr, p def _check_results(self, stdout, stderr, popen): if popen.returncode != 0: raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (self.filename, popen.returncode, stderr)) def _build_command(self): return [self.filename] class ClientScriptVaultSecret(ScriptVaultSecret): VAULT_ID_UNKNOWN_RC = 2 def __init__(self, filename=None, encoding=None, loader=None, vault_id=None): super(ClientScriptVaultSecret, self).__init__(filename=filename, encoding=encoding, loader=loader) self._vault_id = vault_id display.vvvv(u'Executing vault password client script: %s --vault-id %s' % (to_text(filename), to_text(vault_id))) def _run(self, command): try: p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: msg_format = "Problem running vault password client script %s (%s)." \ " If this is not a script, remove the executable bit from the file." msg = msg_format % (self.filename, e) raise AnsibleError(msg) stdout, stderr = p.communicate() return stdout, stderr, p def _check_results(self, stdout, stderr, popen): if popen.returncode == self.VAULT_ID_UNKNOWN_RC: raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' % (self.filename, self._vault_id, stderr)) if popen.returncode != 0: raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" % (self.filename, popen.returncode, self._vault_id, stderr)) def _build_command(self): command = [self.filename] if self._vault_id: command.extend(['--vault-id', self._vault_id]) return command def __repr__(self): if self.filename: return "%s(filename='%s', vault_id='%s')" % \ (self.__class__.__name__, self.filename, self._vault_id) return "%s()" % (self.__class__.__name__) def match_secrets(secrets, target_vault_ids): '''Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets''' if not secrets: return [] matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids] return matches def match_best_secret(secrets, target_vault_ids): '''Find the best secret from secrets that matches target_vault_ids Since secrets should be ordered so the early secrets are 'better' than later ones, this just finds all the matches, then returns the first secret''' matches = match_secrets(secrets, target_vault_ids) if matches: return matches[0] # raise exception? return None def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None): # See if the --encrypt-vault-id matches a vault-id display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id)) if encrypt_vault_id is None: raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id') encrypt_vault_id_matchers = [encrypt_vault_id] encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers) # return the best match for --encrypt-vault-id if encrypt_secret: return encrypt_secret # If we specified a encrypt_vault_id and we couldn't find it, dont # fallback to using the first/best secret raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id, [_v for _v, _vs in secrets])) def match_encrypt_secret(secrets, encrypt_vault_id=None): '''Find the best/first/only secret in secrets to use for encrypting''' display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id)) # See if the --encrypt-vault-id matches a vault-id if encrypt_vault_id: return match_encrypt_vault_id_secret(secrets, encrypt_vault_id=encrypt_vault_id) # Find the best/first secret from secrets since we didnt specify otherwise # ie, consider all of the available secrets as matches _vault_id_matchers = [_vault_id for _vault_id, dummy in secrets] best_secret = match_best_secret(secrets, _vault_id_matchers) # can be empty list sans any tuple return best_secret class VaultLib: def __init__(self, secrets=None): self.secrets = secrets or [] self.cipher_name = None self.b_version = b'1.2' def encrypt(self, plaintext, secret=None, vault_id=None): """Vault encrypt a piece of data. :arg plaintext: a text or byte string to encrypt. :returns: a utf-8 encoded byte str of encrypted data. The string contains a header identifying this as vault encrypted data and formatted to newline terminated lines of 80 characters. This is suitable for dumping as is to a vault file. If the string passed in is a text string, it will be encoded to UTF-8 before encryption. """ if secret is None: if self.secrets: dummy, secret = match_encrypt_secret(self.secrets) else: raise AnsibleVaultError("A vault password must be specified to encrypt data") b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict') if is_encrypted(b_plaintext): raise AnsibleError("input is already encrypted") if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST: self.cipher_name = u"AES256" try: this_cipher = CIPHER_MAPPING[self.cipher_name]() except KeyError: raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name)) # encrypt data if vault_id: display.vvvvv(u'Encrypting with vault_id "%s" and vault secret %s' % (to_text(vault_id), to_text(secret))) else: display.vvvvv(u'Encrypting without a vault_id using vault secret %s' % to_text(secret)) b_ciphertext = this_cipher.encrypt(b_plaintext, secret) # format the data for output to the file b_vaulttext = format_vaulttext_envelope(b_ciphertext, self.cipher_name, vault_id=vault_id) return b_vaulttext def decrypt(self, vaulttext, filename=None): '''Decrypt a piece of vault encrypted data. :arg vaulttext: a string to decrypt. Since vault encrypted data is an ascii text format this can be either a byte str or unicode string. :kwarg filename: a filename that the data came from. This is only used to make better error messages in case the data cannot be decrypted. :returns: a byte string containing the decrypted data and the vault-id that was used ''' plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename) return plaintext def decrypt_and_get_vault_id(self, vaulttext, filename=None): """Decrypt a piece of vault encrypted data. :arg vaulttext: a string to decrypt. Since vault encrypted data is an ascii text format this can be either a byte str or unicode string. :kwarg filename: a filename that the data came from. This is only used to make better error messages in case the data cannot be decrypted. :returns: a byte string containing the decrypted data and the vault-id vault-secret that was used """ b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8') if self.secrets is None: raise AnsibleVaultError("A vault password must be specified to decrypt data") if not is_encrypted(b_vaulttext): msg = "input is not vault encrypted data" if filename: msg += "%s is not a vault encrypted file" % to_native(filename) raise AnsibleError(msg) b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename) # create the cipher object, note that the cipher used for decrypt can # be different than the cipher used for encrypt if cipher_name in CIPHER_WHITELIST: this_cipher = CIPHER_MAPPING[cipher_name]() else: raise AnsibleError("{0} cipher could not be found".format(cipher_name)) b_plaintext = None if not self.secrets: raise AnsibleVaultError('Attempting to decrypt but no vault secrets found') # WARNING: Currently, the vault id is not required to match the vault id in the vault blob to # decrypt a vault properly. The vault id in the vault blob is not part of the encrypted # or signed vault payload. There is no cryptographic checking/verification/validation of the # vault blobs vault id. It can be tampered with and changed. The vault id is just a nick # name to use to pick the best secret and provide some ux/ui info. # iterate over all the applicable secrets (all of them by default) until one works... # if we specify a vault_id, only the corresponding vault secret is checked and # we check it first. vault_id_matchers = [] vault_id_used = None vault_secret_used = None if vault_id: display.vvvvv(u'Found a vault_id (%s) in the vaulttext' % to_text(vault_id)) vault_id_matchers.append(vault_id) _matches = match_secrets(self.secrets, vault_id_matchers) if _matches: display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(filename))) else: display.vvvvv(u'Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % to_text(vault_id)) # Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and # the known vault secrets. if not C.DEFAULT_VAULT_ID_MATCH: # Add all of the known vault_ids as candidates for decrypting a vault. vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id]) matched_secrets = match_secrets(self.secrets, vault_id_matchers) # for vault_secret_id in vault_secret_ids: for vault_secret_id, vault_secret in matched_secrets: display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(filename))) try: # secret = self.secrets[vault_secret_id] display.vvvv(u'Trying secret %s for vault_id=%s' % (to_text(vault_secret), to_text(vault_secret_id))) b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret) if b_plaintext is not None: vault_id_used = vault_secret_id vault_secret_used = vault_secret file_slug = '' if filename: file_slug = ' of "%s"' % filename display.vvvvv( u'Decrypt%s successful with secret=%s and vault_id=%s' % (to_text(file_slug), to_text(vault_secret), to_text(vault_secret_id)) ) break except AnsibleVaultFormatError as exc: msg = u"There was a vault format error" if filename: msg += u' in %s' % (to_text(filename)) msg += u': %s' % exc display.warning(msg) raise except AnsibleError as e: display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' % (to_text(vault_secret_id), to_text(filename), e)) continue else: msg = "Decryption failed (no vault secrets were found that could decrypt)" if filename: msg += " on %s" % to_native(filename) raise AnsibleVaultError(msg) if b_plaintext is None: msg = "Decryption failed" if filename: msg += " on %s" % to_native(filename) raise AnsibleError(msg) return b_plaintext, vault_id_used, vault_secret_used class VaultEditor: def __init__(self, vault=None): # TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects... self.vault = vault or VaultLib() # TODO: mv shred file stuff to it's own class def _shred_file_custom(self, tmp_path): """"Destroy a file, when shred (core-utils) is not available Unix `shred' destroys files "so that they can be recovered only with great difficulty with specialised hardware, if at all". It is based on the method from the paper "Secure Deletion of Data from Magnetic and Solid-State Memory", Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996). We do not go to that length to re-implement shred in Python; instead, overwriting with a block of random data should suffice. See https://github.com/ansible/ansible/pull/13700 . """ file_len = os.path.getsize(tmp_path) if file_len > 0: # avoid work when file was empty max_chunk_len = min(1024 * 1024 * 2, file_len) passes = 3 with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) # get a random chunk of data, each pass with other length chunk_len = random.randint(max_chunk_len // 2, max_chunk_len) data = os.urandom(chunk_len) for _ in range(0, file_len // chunk_len): fh.write(data) fh.write(data[:file_len % chunk_len]) # FIXME remove this assert once we have unittests to check its accuracy if fh.tell() != file_len: raise AnsibleAssertionError() os.fsync(fh) def _shred_file(self, tmp_path): """Securely destroy a decrypted file Note standard limitations of GNU shred apply (For flash, overwriting would have no effect due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), it is a non-issue. Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on a custom shredding method. """ if not os.path.isfile(tmp_path): # file is already gone return try: r = subprocess.call(['shred', tmp_path]) except (OSError, ValueError): # shred is not available on this system, or some other error occurred. # ValueError caught because macOS El Capitan is raising an # exception big enough to hit a limit in python2-2.7.11 and below. # Symptom is ValueError: insecure pickle when shred is not # installed there. r = 1 if r != 0: # we could not successfully execute unix shred; therefore, do custom shred. self._shred_file_custom(tmp_path) os.remove(tmp_path) def _edit_file_helper(self, filename, secret, existing_data=None, force_save=False, vault_id=None): # Create a tempfile root, ext = os.path.splitext(os.path.realpath(filename)) fd, tmp_path = tempfile.mkstemp(suffix=ext) os.close(fd) cmd = self._editor_shell_command(tmp_path) try: if existing_data: self.write_data(existing_data, tmp_path, shred=False) # drop the user into an editor on the tmp file subprocess.call(cmd) except Exception as e: # whatever happens, destroy the decrypted file self._shred_file(tmp_path) raise AnsibleError('Unable to execute the command "%s": %s' % (' '.join(cmd), to_native(e))) b_tmpdata = self.read_data(tmp_path) # Do nothing if the content has not changed if existing_data == b_tmpdata and not force_save: self._shred_file(tmp_path) return # encrypt new data and write out to tmp # An existing vaultfile will always be UTF-8, # so decode to unicode here b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id) self.write_data(b_ciphertext, tmp_path) # shuffle tmp file into place self.shuffle_files(tmp_path, filename) display.vvvvv(u'Saved edited file "%s" encrypted using %s and vault id "%s"' % (to_text(filename), to_text(secret), to_text(vault_id))) def _real_path(self, filename): # '-' is special to VaultEditor, dont expand it. if filename == '-': return filename real_path = os.path.realpath(filename) return real_path def encrypt_bytes(self, b_plaintext, secret, vault_id=None): b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id) return b_ciphertext def encrypt_file(self, filename, secret, vault_id=None, output_file=None): # A file to be encrypted into a vaultfile could be any encoding # so treat the contents as a byte string. # follow the symlink filename = self._real_path(filename) b_plaintext = self.read_data(filename) b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id) self.write_data(b_ciphertext, output_file or filename) def decrypt_file(self, filename, output_file=None): # follow the symlink filename = self._real_path(filename) ciphertext = self.read_data(filename) try: plaintext = self.vault.decrypt(ciphertext, filename=filename) except AnsibleError as e: raise AnsibleError("%s for %s" % (to_native(e), to_native(filename))) self.write_data(plaintext, output_file or filename, shred=False) def create_file(self, filename, secret, vault_id=None): """ create a new encrypted file """ dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): display.warning(u"%s does not exist, creating..." % to_text(dirname)) makedirs_safe(dirname) # FIXME: If we can raise an error here, we can probably just make it # behave like edit instead. if os.path.isfile(filename): raise AnsibleError("%s exists, please use 'edit' instead" % filename) self._edit_file_helper(filename, secret, vault_id=vault_id) def edit_file(self, filename): vault_id_used = None vault_secret_used = None # follow the symlink filename = self._real_path(filename) b_vaulttext = self.read_data(filename) # vault or yaml files are always utf8 vaulttext = to_text(b_vaulttext) try: # vaulttext gets converted back to bytes, but alas # TODO: return the vault_id that worked? plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext) except AnsibleError as e: raise AnsibleError("%s for %s" % (to_native(e), to_native(filename))) # Figure out the vault id from the file, to select the right secret to re-encrypt it # (duplicates parts of decrypt, but alas...) dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename) # vault id here may not be the vault id actually used for decrypting # as when the edited file has no vault-id but is decrypted by non-default id in secrets # (vault_id=default, while a different vault-id decrypted) # Keep the same vault-id (and version) as in the header if cipher_name not in CIPHER_WRITE_WHITELIST: # we want to get rid of files encrypted with the AES cipher self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext, force_save=True, vault_id=vault_id) else: self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext, force_save=False, vault_id=vault_id) def plaintext(self, filename): b_vaulttext = self.read_data(filename) vaulttext = to_text(b_vaulttext) try: plaintext = self.vault.decrypt(vaulttext, filename=filename) return plaintext except AnsibleError as e: raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename))) # FIXME/TODO: make this use VaultSecret def rekey_file(self, filename, new_vault_secret, new_vault_id=None): # follow the symlink filename = self._real_path(filename) prev = os.stat(filename) b_vaulttext = self.read_data(filename) vaulttext = to_text(b_vaulttext) display.vvvvv(u'Rekeying file "%s" to with new vault-id "%s" and vault secret %s' % (to_text(filename), to_text(new_vault_id), to_text(new_vault_secret))) try: plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext) except AnsibleError as e: raise AnsibleError("%s for %s" % (to_native(e), to_native(filename))) # This is more or less an assert, see #18247 if new_vault_secret is None: raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename) # FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets # Need a new VaultLib because the new vault data can be a different # vault lib format or cipher (for ex, when we migrate 1.0 style vault data to # 1.1 style data we change the version and the cipher). This is where a VaultContext might help # the new vault will only be used for encrypting, so it doesn't need the vault secrets # (we will pass one in directly to encrypt) new_vault = VaultLib(secrets={}) b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id) self.write_data(b_new_vaulttext, filename) # preserve permissions os.chmod(filename, prev.st_mode) os.chown(filename, prev.st_uid, prev.st_gid) display.vvvvv(u'Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' % (to_text(filename), to_text(vault_id_used), to_text(new_vault_id), to_text(new_vault_secret))) def read_data(self, filename): try: if filename == '-': data = sys.stdin.read() else: with open(filename, "rb") as fh: data = fh.read() except Exception as e: msg = to_native(e) if not msg: msg = repr(e) raise AnsibleError('Unable to read source file (%s): %s' % (to_native(filename), msg)) return data # TODO: add docstrings for arg types since this code is picky about that def write_data(self, data, filename, shred=True): """Write the data bytes to given path This is used to write a byte string to a file or stdout. It is used for writing the results of vault encryption or decryption. It is used for saving the ciphertext after encryption and it is also used for saving the plaintext after decrypting a vault. The type of the 'data' arg should be bytes, since in the plaintext case, the original contents can be of any text encoding or arbitrary binary data. When used to write the result of vault encryption, the val of the 'data' arg should be a utf-8 encoded byte string and not a text typ and not a text type.. When used to write the result of vault decryption, the val of the 'data' arg should be a byte string and not a text type. :arg data: the byte string (bytes) data :arg filename: filename to save 'data' to. :arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered. :returns: None """ # FIXME: do we need this now? data_bytes should always be a utf-8 byte string b_file_data = to_bytes(data, errors='strict') # get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2 # We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext # of the vaulted object could be anything/binary/etc output = getattr(sys.stdout, 'buffer', sys.stdout) if filename == '-': output.write(b_file_data) else: if os.path.isfile(filename): if shred: self._shred_file(filename) else: os.remove(filename) with open(filename, "wb") as fh: fh.write(b_file_data) def shuffle_files(self, src, dest): prev = None # overwrite dest with src if os.path.isfile(dest): prev = os.stat(dest) # old file 'dest' was encrypted, no need to _shred_file os.remove(dest) shutil.move(src, dest) # reset permissions if needed if prev is not None: # TODO: selinux, ACLs, xattr? os.chmod(dest, prev.st_mode) os.chown(dest, prev.st_uid, prev.st_gid) def _editor_shell_command(self, filename): env_editor = os.environ.get('EDITOR', 'vi') editor = shlex.split(env_editor) editor.append(filename) return editor ######################################## # CIPHERS # ######################################## class VaultAES256: """ Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. Keys are derived using PBKDF2 """ # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html # Note: strings in this class should be byte strings by default. def __init__(self): if not HAS_CRYPTOGRAPHY and not HAS_PYCRYPTO: raise AnsibleError(NEED_CRYPTO_LIBRARY) @staticmethod def _create_key_cryptography(b_password, b_salt, key_length, iv_length): kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=2 * key_length + iv_length, salt=b_salt, iterations=10000, backend=CRYPTOGRAPHY_BACKEND) b_derivedkey = kdf.derive(b_password) return b_derivedkey @staticmethod def _pbkdf2_prf(p, s): hash_function = SHA256_pycrypto return HMAC_pycrypto.new(p, s, hash_function).digest() @classmethod def _create_key_pycrypto(cls, b_password, b_salt, key_length, iv_length): # make two keys and one iv b_derivedkey = PBKDF2_pycrypto(b_password, b_salt, dkLen=(2 * key_length) + iv_length, count=10000, prf=cls._pbkdf2_prf) return b_derivedkey @classmethod def _gen_key_initctr(cls, b_password, b_salt): # 16 for AES 128, 32 for AES256 key_length = 32 if HAS_CRYPTOGRAPHY: # AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes iv_length = algorithms.AES.block_size // 8 b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length) b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length] elif HAS_PYCRYPTO: # match the size used for counter.new to avoid extra work iv_length = 16 b_derivedkey = cls._create_key_pycrypto(b_password, b_salt, key_length, iv_length) b_iv = hexlify(b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]) else: raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)') b_key1 = b_derivedkey[:key_length] b_key2 = b_derivedkey[key_length:(key_length * 2)] return b_key1, b_key2, b_iv @staticmethod def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv): cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND) encryptor = cipher.encryptor() padder = padding.PKCS7(algorithms.AES.block_size).padder() b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize()) b_ciphertext += encryptor.finalize() # COMBINE SALT, DIGEST AND DATA hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND) hmac.update(b_ciphertext) b_hmac = hmac.finalize() return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext) @staticmethod def _encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv): # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 bs = AES_pycrypto.block_size padding_length = (bs - len(b_plaintext) % bs) or bs b_plaintext += to_bytes(padding_length * chr(padding_length), encoding='ascii', errors='strict') # COUNTER.new PARAMETERS # 1) nbits (integer) - Length of the counter, in bits. # 2) initial_value (integer) - initial value of the counter. "iv" from _gen_key_initctr ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16)) # AES.new PARAMETERS # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from _gen_key_initctr # 2) MODE_CTR, is the recommended mode # 3) counter=<CounterObject> cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr) # ENCRYPT PADDED DATA b_ciphertext = cipher.encrypt(b_plaintext) # COMBINE SALT, DIGEST AND DATA hmac = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto) return to_bytes(hmac.hexdigest(), errors='surrogate_or_strict'), hexlify(b_ciphertext) @classmethod def encrypt(cls, b_plaintext, secret): if secret is None: raise AnsibleVaultError('The secret passed to encrypt() was None') b_salt = os.urandom(32) b_password = secret.bytes b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt) if HAS_CRYPTOGRAPHY: b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv) elif HAS_PYCRYPTO: b_hmac, b_ciphertext = cls._encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv) else: raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)') b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext]) # Unnecessary but getting rid of it is a backwards incompatible vault # format change b_vaulttext = hexlify(b_vaulttext) return b_vaulttext @classmethod def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv): # b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt) # EXIT EARLY IF DIGEST DOESN'T MATCH hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND) hmac.update(b_ciphertext) try: hmac.verify(_unhexlify(b_crypted_hmac)) except InvalidSignature as e: raise AnsibleVaultError('HMAC verification failed: %s' % e) cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND) decryptor = cipher.decryptor() unpadder = padding.PKCS7(128).unpadder() b_plaintext = unpadder.update( decryptor.update(b_ciphertext) + decryptor.finalize() ) + unpadder.finalize() return b_plaintext @staticmethod def _is_equal(b_a, b_b): """ Comparing 2 byte arrrays in constant time to avoid timing attacks. It would be nice if there was a library for this but hey. """ if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)): raise TypeError('_is_equal can only be used to compare two byte strings') # http://codahale.com/a-lesson-in-timing-attacks/ if len(b_a) != len(b_b): return False result = 0 for b_x, b_y in zip(b_a, b_b): if PY3: result |= b_x ^ b_y else: result |= ord(b_x) ^ ord(b_y) return result == 0 @classmethod def _decrypt_pycrypto(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv): # EXIT EARLY IF DIGEST DOESN'T MATCH hmac_decrypt = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto) if not cls._is_equal(b_crypted_hmac, to_bytes(hmac_decrypt.hexdigest())): return None # SET THE COUNTER AND THE CIPHER ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16)) cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr) # DECRYPT PADDED DATA b_plaintext = cipher.decrypt(b_ciphertext) # UNPAD DATA if PY3: padding_length = b_plaintext[-1] else: padding_length = ord(b_plaintext[-1]) b_plaintext = b_plaintext[:-padding_length] return b_plaintext @classmethod def decrypt(cls, b_vaulttext, secret): b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext) # TODO: would be nice if a VaultSecret could be passed directly to _decrypt_* # (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?) # though, likely needs to be python cryptography specific impl that basically # creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2 b_password = secret.bytes b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt) if HAS_CRYPTOGRAPHY: b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv) elif HAS_PYCRYPTO: b_plaintext = cls._decrypt_pycrypto(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv) else: raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)') return b_plaintext # Keys could be made bytes later if the code that gets the data is more # naturally byte-oriented CIPHER_MAPPING = { u'AES256': VaultAES256, }
gpl-3.0
shahankhatch/aurora
src/test/python/apache/aurora/client/cli/test_config_noun.py
5
3105
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import textwrap from mock import patch from twitter.common.contextutil import temporary_file from apache.aurora.client.cli import EXIT_COMMAND_FAILURE from apache.aurora.client.cli.client import AuroraCommandLine from .util import AuroraClientCommandTest, FakeAuroraCommandContext class TestClientCreateCommand(AuroraClientCommandTest): def test_list_configs(self): mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.config.ConfigNoun.create_context', return_value=mock_context): with temporary_file() as fp: fp.write(self.get_valid_config()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['config', 'list', fp.name]) assert mock_context.out == ['jobs=[west/bozo/test/hello]'] assert mock_context.err == [] def test_list_configs_invalid(self): mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.config.ConfigNoun.create_context', return_value=mock_context): with temporary_file() as fp: fp.write(self.get_invalid_config("blather=...")) fp.flush() cmd = AuroraCommandLine() result = cmd.execute(['config', 'list', fp.name]) assert result == EXIT_COMMAND_FAILURE assert mock_context.out == [] assert any(line.startswith("Error loading configuration file: invalid syntax") for line in mock_context.err) def get_config_with_no_jobs(self): return textwrap.dedent(""" HELLO_WORLD = Job( name = '%(job)s', role = '%(role)s', cluster = '%(cluster)s', environment = '%(env)s', instances = 20, update_config = UpdateConfig( batch_size = 5, restart_threshold = 30, watch_secs = 10, max_per_shard_failures = 2, ), task = Task( name = 'test', processes = [Process(name = 'hello_world', cmdline = 'echo {{thermos.ports[http]}}')], resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB), ) ) """) def test_list_configs_nojobs(self): mock_context = FakeAuroraCommandContext() with patch('apache.aurora.client.cli.config.ConfigNoun.create_context', return_value=mock_context): with temporary_file() as fp: fp.write(self.get_config_with_no_jobs()) fp.flush() cmd = AuroraCommandLine() cmd.execute(['config', 'list', fp.name]) assert mock_context.out == ["jobs=[]"] assert mock_context.err == []
apache-2.0
weebygames/boto
tests/unit/manage/test_ssh.py
114
2004
#!/usr/bin/env python # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # try: import paramiko from boto.manage.cmdshell import SSHClient except ImportError: paramiko = None SSHClient = None from tests.compat import mock, unittest class TestSSHTimeout(unittest.TestCase): @unittest.skipIf(not paramiko, 'Paramiko missing') def test_timeout(self): client_tmp = paramiko.SSHClient def client_mock(): client = client_tmp() client.connect = mock.Mock(name='connect') return client paramiko.SSHClient = client_mock paramiko.RSAKey.from_private_key_file = mock.Mock() server = mock.Mock() test = SSHClient(server) self.assertEqual(test._ssh_client.connect.call_args[1]['timeout'], None) test2 = SSHClient(server, timeout=30) self.assertEqual(test2._ssh_client.connect.call_args[1]['timeout'], 30)
mit
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/snowballstemmer/finnish_stemmer.py
19
25600
# self file was generated automatically by the Snowball to Python interpreter from .basestemmer import BaseStemmer from .among import Among class FinnishStemmer(BaseStemmer): ''' self class was automatically generated by a Snowball to Python interpreter It implements the stemming algorithm defined by a snowball script. ''' serialVersionUID = 1 a_0 = [ Among(u"pa", -1, 1), Among(u"sti", -1, 2), Among(u"kaan", -1, 1), Among(u"han", -1, 1), Among(u"kin", -1, 1), Among(u"h\u00E4n", -1, 1), Among(u"k\u00E4\u00E4n", -1, 1), Among(u"ko", -1, 1), Among(u"p\u00E4", -1, 1), Among(u"k\u00F6", -1, 1) ] a_1 = [ Among(u"lla", -1, -1), Among(u"na", -1, -1), Among(u"ssa", -1, -1), Among(u"ta", -1, -1), Among(u"lta", 3, -1), Among(u"sta", 3, -1) ] a_2 = [ Among(u"ll\u00E4", -1, -1), Among(u"n\u00E4", -1, -1), Among(u"ss\u00E4", -1, -1), Among(u"t\u00E4", -1, -1), Among(u"lt\u00E4", 3, -1), Among(u"st\u00E4", 3, -1) ] a_3 = [ Among(u"lle", -1, -1), Among(u"ine", -1, -1) ] a_4 = [ Among(u"nsa", -1, 3), Among(u"mme", -1, 3), Among(u"nne", -1, 3), Among(u"ni", -1, 2), Among(u"si", -1, 1), Among(u"an", -1, 4), Among(u"en", -1, 6), Among(u"\u00E4n", -1, 5), Among(u"ns\u00E4", -1, 3) ] a_5 = [ Among(u"aa", -1, -1), Among(u"ee", -1, -1), Among(u"ii", -1, -1), Among(u"oo", -1, -1), Among(u"uu", -1, -1), Among(u"\u00E4\u00E4", -1, -1), Among(u"\u00F6\u00F6", -1, -1) ] a_6 = [ Among(u"a", -1, 8), Among(u"lla", 0, -1), Among(u"na", 0, -1), Among(u"ssa", 0, -1), Among(u"ta", 0, -1), Among(u"lta", 4, -1), Among(u"sta", 4, -1), Among(u"tta", 4, 9), Among(u"lle", -1, -1), Among(u"ine", -1, -1), Among(u"ksi", -1, -1), Among(u"n", -1, 7), Among(u"han", 11, 1), Among(u"den", 11, -1, "r_VI"), Among(u"seen", 11, -1, "r_LONG"), Among(u"hen", 11, 2), Among(u"tten", 11, -1, "r_VI"), Among(u"hin", 11, 3), Among(u"siin", 11, -1, "r_VI"), Among(u"hon", 11, 4), Among(u"h\u00E4n", 11, 5), Among(u"h\u00F6n", 11, 6), Among(u"\u00E4", -1, 8), Among(u"ll\u00E4", 22, -1), Among(u"n\u00E4", 22, -1), Among(u"ss\u00E4", 22, -1), Among(u"t\u00E4", 22, -1), Among(u"lt\u00E4", 26, -1), Among(u"st\u00E4", 26, -1), Among(u"tt\u00E4", 26, 9) ] a_7 = [ Among(u"eja", -1, -1), Among(u"mma", -1, 1), Among(u"imma", 1, -1), Among(u"mpa", -1, 1), Among(u"impa", 3, -1), Among(u"mmi", -1, 1), Among(u"immi", 5, -1), Among(u"mpi", -1, 1), Among(u"impi", 7, -1), Among(u"ej\u00E4", -1, -1), Among(u"mm\u00E4", -1, 1), Among(u"imm\u00E4", 10, -1), Among(u"mp\u00E4", -1, 1), Among(u"imp\u00E4", 12, -1) ] a_8 = [ Among(u"i", -1, -1), Among(u"j", -1, -1) ] a_9 = [ Among(u"mma", -1, 1), Among(u"imma", 0, -1) ] g_AEI = [17, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8] g_V1 = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32] g_V2 = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32] g_particle_end = [17, 97, 24, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32] B_ending_removed = False S_x = "" I_p2 = 0 I_p1 = 0 def copy_from(self, other): self.B_ending_removed = other.B_ending_removed self.S_x = other.S_x self.I_p2 = other.I_p2 self.I_p1 = other.I_p1 super.copy_from(other) def r_mark_regions(self): # (, line 41 self.I_p1 = self.limit; self.I_p2 = self.limit; # goto, line 46 try: while True: v_1 = self.cursor try: if not self.in_grouping(FinnishStemmer.g_V1, 97, 246): raise lab1() self.cursor = v_1 raise lab0() except lab1: pass self.cursor = v_1 if self.cursor >= self.limit: return False self.cursor += 1 except lab0: pass # gopast, line 46 try: while True: try: if not self.out_grouping(FinnishStemmer.g_V1, 97, 246): raise lab3() raise lab2() except lab3: pass if self.cursor >= self.limit: return False self.cursor += 1 except lab2: pass # setmark p1, line 46 self.I_p1 = self.cursor # goto, line 47 try: while True: v_3 = self.cursor try: if not self.in_grouping(FinnishStemmer.g_V1, 97, 246): raise lab5() self.cursor = v_3 raise lab4() except lab5: pass self.cursor = v_3 if self.cursor >= self.limit: return False self.cursor += 1 except lab4: pass # gopast, line 47 try: while True: try: if not self.out_grouping(FinnishStemmer.g_V1, 97, 246): raise lab7() raise lab6() except lab7: pass if self.cursor >= self.limit: return False self.cursor += 1 except lab6: pass # setmark p2, line 47 self.I_p2 = self.cursor return True def r_R2(self): if not self.I_p2 <= self.cursor: return False return True def r_particle_etc(self): # (, line 54 # setlimit, line 55 v_1 = self.limit - self.cursor # tomark, line 55 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 55 # [, line 55 self.ket = self.cursor # substring, line 55 among_var = self.find_among_b(FinnishStemmer.a_0, 10) if among_var == 0: self.limit_backward = v_2 return False # ], line 55 self.bra = self.cursor self.limit_backward = v_2 if among_var == 0: return False elif among_var == 1: # (, line 62 if not self.in_grouping_b(FinnishStemmer.g_particle_end, 97, 246): return False elif among_var == 2: # (, line 64 # call R2, line 64 if not self.r_R2(): return False # delete, line 66 if not self.slice_del(): return False return True def r_possessive(self): # (, line 68 # setlimit, line 69 v_1 = self.limit - self.cursor # tomark, line 69 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 69 # [, line 69 self.ket = self.cursor # substring, line 69 among_var = self.find_among_b(FinnishStemmer.a_4, 9) if among_var == 0: self.limit_backward = v_2 return False # ], line 69 self.bra = self.cursor self.limit_backward = v_2 if among_var == 0: return False elif among_var == 1: # (, line 72 # not, line 72 v_3 = self.limit - self.cursor try: # literal, line 72 if not self.eq_s_b(1, u"k"): raise lab0() return False except lab0: pass self.cursor = self.limit - v_3 # delete, line 72 if not self.slice_del(): return False elif among_var == 2: # (, line 74 # delete, line 74 if not self.slice_del(): return False # [, line 74 self.ket = self.cursor # literal, line 74 if not self.eq_s_b(3, u"kse"): return False # ], line 74 self.bra = self.cursor # <-, line 74 if not self.slice_from(u"ksi"): return False elif among_var == 3: # (, line 78 # delete, line 78 if not self.slice_del(): return False elif among_var == 4: # (, line 81 # among, line 81 if self.find_among_b(FinnishStemmer.a_1, 6) == 0: return False # delete, line 81 if not self.slice_del(): return False elif among_var == 5: # (, line 83 # among, line 83 if self.find_among_b(FinnishStemmer.a_2, 6) == 0: return False # delete, line 84 if not self.slice_del(): return False elif among_var == 6: # (, line 86 # among, line 86 if self.find_among_b(FinnishStemmer.a_3, 2) == 0: return False # delete, line 86 if not self.slice_del(): return False return True def r_LONG(self): # among, line 91 if self.find_among_b(FinnishStemmer.a_5, 7) == 0: return False return True def r_VI(self): # (, line 93 # literal, line 93 if not self.eq_s_b(1, u"i"): return False if not self.in_grouping_b(FinnishStemmer.g_V2, 97, 246): return False return True def r_case_ending(self): # (, line 95 # setlimit, line 96 v_1 = self.limit - self.cursor # tomark, line 96 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 96 # [, line 96 self.ket = self.cursor # substring, line 96 among_var = self.find_among_b(FinnishStemmer.a_6, 30) if among_var == 0: self.limit_backward = v_2 return False # ], line 96 self.bra = self.cursor self.limit_backward = v_2 if among_var == 0: return False elif among_var == 1: # (, line 98 # literal, line 98 if not self.eq_s_b(1, u"a"): return False elif among_var == 2: # (, line 99 # literal, line 99 if not self.eq_s_b(1, u"e"): return False elif among_var == 3: # (, line 100 # literal, line 100 if not self.eq_s_b(1, u"i"): return False elif among_var == 4: # (, line 101 # literal, line 101 if not self.eq_s_b(1, u"o"): return False elif among_var == 5: # (, line 102 # literal, line 102 if not self.eq_s_b(1, u"\u00E4"): return False elif among_var == 6: # (, line 103 # literal, line 103 if not self.eq_s_b(1, u"\u00F6"): return False elif among_var == 7: # (, line 111 # try, line 111 v_3 = self.limit - self.cursor try: # (, line 111 # and, line 113 v_4 = self.limit - self.cursor # or, line 112 try: v_5 = self.limit - self.cursor try: # call LONG, line 111 if not self.r_LONG(): raise lab2() raise lab1() except lab2: pass self.cursor = self.limit - v_5 # literal, line 112 if not self.eq_s_b(2, u"ie"): self.cursor = self.limit - v_3 raise lab0() except lab1: pass self.cursor = self.limit - v_4 # next, line 113 if self.cursor <= self.limit_backward: self.cursor = self.limit - v_3 raise lab0() self.cursor -= 1 # ], line 113 self.bra = self.cursor except lab0: pass elif among_var == 8: # (, line 119 if not self.in_grouping_b(FinnishStemmer.g_V1, 97, 246): return False if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246): return False elif among_var == 9: # (, line 121 # literal, line 121 if not self.eq_s_b(1, u"e"): return False # delete, line 138 if not self.slice_del(): return False # set ending_removed, line 139 self.B_ending_removed = True return True def r_other_endings(self): # (, line 141 # setlimit, line 142 v_1 = self.limit - self.cursor # tomark, line 142 if self.cursor < self.I_p2: return False self.cursor = self.I_p2 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 142 # [, line 142 self.ket = self.cursor # substring, line 142 among_var = self.find_among_b(FinnishStemmer.a_7, 14) if among_var == 0: self.limit_backward = v_2 return False # ], line 142 self.bra = self.cursor self.limit_backward = v_2 if among_var == 0: return False elif among_var == 1: # (, line 146 # not, line 146 v_3 = self.limit - self.cursor try: # literal, line 146 if not self.eq_s_b(2, u"po"): raise lab0() return False except lab0: pass self.cursor = self.limit - v_3 # delete, line 151 if not self.slice_del(): return False return True def r_i_plural(self): # (, line 153 # setlimit, line 154 v_1 = self.limit - self.cursor # tomark, line 154 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 154 # [, line 154 self.ket = self.cursor # substring, line 154 if self.find_among_b(FinnishStemmer.a_8, 2) == 0: self.limit_backward = v_2 return False # ], line 154 self.bra = self.cursor self.limit_backward = v_2 # delete, line 158 if not self.slice_del(): return False return True def r_t_plural(self): # (, line 160 # setlimit, line 161 v_1 = self.limit - self.cursor # tomark, line 161 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 161 # [, line 162 self.ket = self.cursor # literal, line 162 if not self.eq_s_b(1, u"t"): self.limit_backward = v_2 return False # ], line 162 self.bra = self.cursor # test, line 162 v_3 = self.limit - self.cursor if not self.in_grouping_b(FinnishStemmer.g_V1, 97, 246): self.limit_backward = v_2 return False self.cursor = self.limit - v_3 # delete, line 163 if not self.slice_del(): return False self.limit_backward = v_2 # setlimit, line 165 v_4 = self.limit - self.cursor # tomark, line 165 if self.cursor < self.I_p2: return False self.cursor = self.I_p2 v_5 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_4 # (, line 165 # [, line 165 self.ket = self.cursor # substring, line 165 among_var = self.find_among_b(FinnishStemmer.a_9, 2) if among_var == 0: self.limit_backward = v_5 return False # ], line 165 self.bra = self.cursor self.limit_backward = v_5 if among_var == 0: return False elif among_var == 1: # (, line 167 # not, line 167 v_6 = self.limit - self.cursor try: # literal, line 167 if not self.eq_s_b(2, u"po"): raise lab0() return False except lab0: pass self.cursor = self.limit - v_6 # delete, line 170 if not self.slice_del(): return False return True def r_tidy(self): # (, line 172 # setlimit, line 173 v_1 = self.limit - self.cursor # tomark, line 173 if self.cursor < self.I_p1: return False self.cursor = self.I_p1 v_2 = self.limit_backward self.limit_backward = self.cursor self.cursor = self.limit - v_1 # (, line 173 # do, line 174 v_3 = self.limit - self.cursor try: # (, line 174 # and, line 174 v_4 = self.limit - self.cursor # call LONG, line 174 if not self.r_LONG(): raise lab0() self.cursor = self.limit - v_4 # (, line 174 # [, line 174 self.ket = self.cursor # next, line 174 if self.cursor <= self.limit_backward: raise lab0() self.cursor -= 1 # ], line 174 self.bra = self.cursor # delete, line 174 if not self.slice_del(): return False except lab0: pass self.cursor = self.limit - v_3 # do, line 175 v_5 = self.limit - self.cursor try: # (, line 175 # [, line 175 self.ket = self.cursor if not self.in_grouping_b(FinnishStemmer.g_AEI, 97, 228): raise lab1() # ], line 175 self.bra = self.cursor if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246): raise lab1() # delete, line 175 if not self.slice_del(): return False except lab1: pass self.cursor = self.limit - v_5 # do, line 176 v_6 = self.limit - self.cursor try: # (, line 176 # [, line 176 self.ket = self.cursor # literal, line 176 if not self.eq_s_b(1, u"j"): raise lab2() # ], line 176 self.bra = self.cursor # or, line 176 try: v_7 = self.limit - self.cursor try: # literal, line 176 if not self.eq_s_b(1, u"o"): raise lab4() raise lab3() except lab4: pass self.cursor = self.limit - v_7 # literal, line 176 if not self.eq_s_b(1, u"u"): raise lab2() except lab3: pass # delete, line 176 if not self.slice_del(): return False except lab2: pass self.cursor = self.limit - v_6 # do, line 177 v_8 = self.limit - self.cursor try: # (, line 177 # [, line 177 self.ket = self.cursor # literal, line 177 if not self.eq_s_b(1, u"o"): raise lab5() # ], line 177 self.bra = self.cursor # literal, line 177 if not self.eq_s_b(1, u"j"): raise lab5() # delete, line 177 if not self.slice_del(): return False except lab5: pass self.cursor = self.limit - v_8 self.limit_backward = v_2 # goto, line 179 try: while True: v_9 = self.limit - self.cursor try: if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246): raise lab7() self.cursor = self.limit - v_9 raise lab6() except lab7: pass self.cursor = self.limit - v_9 if self.cursor <= self.limit_backward: return False self.cursor -= 1 except lab6: pass # [, line 179 self.ket = self.cursor # next, line 179 if self.cursor <= self.limit_backward: return False self.cursor -= 1 # ], line 179 self.bra = self.cursor # -> x, line 179 self.S_x = self.slice_to(self.S_x) if self.S_x == '': return False # name x, line 179 if not self.eq_v_b(self.S_x): return False # delete, line 179 if not self.slice_del(): return False return True def _stem(self): # (, line 183 # do, line 185 v_1 = self.cursor try: # call mark_regions, line 185 if not self.r_mark_regions(): raise lab0() except lab0: pass self.cursor = v_1 # unset ending_removed, line 186 self.B_ending_removed = False # backwards, line 187 self.limit_backward = self.cursor self.cursor = self.limit # (, line 187 # do, line 188 v_2 = self.limit - self.cursor try: # call particle_etc, line 188 if not self.r_particle_etc(): raise lab1() except lab1: pass self.cursor = self.limit - v_2 # do, line 189 v_3 = self.limit - self.cursor try: # call possessive, line 189 if not self.r_possessive(): raise lab2() except lab2: pass self.cursor = self.limit - v_3 # do, line 190 v_4 = self.limit - self.cursor try: # call case_ending, line 190 if not self.r_case_ending(): raise lab3() except lab3: pass self.cursor = self.limit - v_4 # do, line 191 v_5 = self.limit - self.cursor try: # call other_endings, line 191 if not self.r_other_endings(): raise lab4() except lab4: pass self.cursor = self.limit - v_5 # or, line 192 try: v_6 = self.limit - self.cursor try: # (, line 192 # Boolean test ending_removed, line 192 if not self.B_ending_removed: raise lab6() # do, line 192 v_7 = self.limit - self.cursor try: # call i_plural, line 192 if not self.r_i_plural(): raise lab7() except lab7: pass self.cursor = self.limit - v_7 raise lab5() except lab6: pass self.cursor = self.limit - v_6 # do, line 192 v_8 = self.limit - self.cursor try: # call t_plural, line 192 if not self.r_t_plural(): raise lab8() except lab8: pass self.cursor = self.limit - v_8 except lab5: pass # do, line 193 v_9 = self.limit - self.cursor try: # call tidy, line 193 if not self.r_tidy(): raise lab9() except lab9: pass self.cursor = self.limit - v_9 self.cursor = self.limit_backward return True def equals(self, o): return isinstance(o, FinnishStemmer) def hashCode(self): return hash("FinnishStemmer") class lab0(BaseException): pass class lab1(BaseException): pass class lab2(BaseException): pass class lab3(BaseException): pass class lab4(BaseException): pass class lab5(BaseException): pass class lab6(BaseException): pass class lab7(BaseException): pass class lab8(BaseException): pass class lab9(BaseException): pass
mit
dancingdan/tensorflow
tensorflow/python/framework/tensor_shape.py
6
31004
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper classes for tensor shape inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.util import compat from tensorflow.python.util.tf_export import tf_export @tf_export("Dimension") class Dimension(object): """Represents the value of one dimension in a TensorShape.""" def __init__(self, value): """Creates a new Dimension with the given value.""" if value is None: self._value = None elif isinstance(value, dtypes.DType): raise TypeError("Cannot convert %s to Dimension" % value) else: self._value = int(value) if (not isinstance(value, compat.bytes_or_text_types) and self._value != value): raise ValueError("Ambiguous dimension: %s" % value) if self._value < 0: raise ValueError("Dimension %d must be >= 0" % self._value) def __repr__(self): return "Dimension(%s)" % repr(self._value) def __str__(self): value = self._value return "?" if value is None else str(value) def __eq__(self, other): """Returns true if `other` has the same known value as this Dimension.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value == other.value def __ne__(self, other): """Returns true if `other` has a different known value from `self`.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value != other.value def __int__(self): return self._value # This is needed for Windows. # See https://github.com/tensorflow/tensorflow/pull/9780 def __long__(self): return self._value def __index__(self): # Allow use in Python 3 range return self._value @property def value(self): """The value of this dimension, or None if it is unknown.""" return self._value def is_compatible_with(self, other): """Returns true if `other` is compatible with this Dimension. Two known Dimensions are compatible if they have the same value. An unknown Dimension is compatible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are compatible. """ other = as_dimension(other) return (self._value is None or other.value is None or self._value == other.value) def assert_is_compatible_with(self, other): """Raises an exception if `other` is not compatible with this Dimension. Args: other: Another Dimension. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ if not self.is_compatible_with(other): raise ValueError("Dimensions %s and %s are not compatible" % (self, other)) def merge_with(self, other): """Returns a Dimension that combines the information in `self` and `other`. Dimensions are combined as follows: ```python tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None) tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m ``` Args: other: Another Dimension. Returns: A Dimension containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ other = as_dimension(other) self.assert_is_compatible_with(other) if self._value is None: return Dimension(other.value) else: return Dimension(self._value) def __add__(self, other): """Returns the sum of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n) tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value + other.value) def __radd__(self, other): """Returns the sum of `other` and `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ return self + other def __sub__(self, other): """Returns the subtraction of `other` from `self`. Dimensions are subtracted as follows: ```python tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n) tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `other` from `self`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value - other.value) def __rsub__(self, other): """Returns the subtraction of `self` from `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `self` from `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value - self._value) def __mul__(self, other): """Returns the product of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n) tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value * other.value) def __rmul__(self, other): """Returns the product of `self` and `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ return self * other def __floordiv__(self, other): """Returns the quotient of `self` and `other` rounded down. Dimensions are divided as follows: ```python tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n) tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value // other.value) def __rfloordiv__(self, other): """Returns the quotient of `other` and `self` rounded down. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value // self._value) def __div__(self, other): """DEPRECATED: Use `__floordiv__` via `x // y` instead. This function exists only for backwards compatibility purposes; new code should use `__floordiv__` via the syntax `x // y`. Using `x // y` communicates clearly that the result rounds down, and is forward compatible to Python 3. Args: other: Another `Dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ return self // other def __mod__(self, other): """Returns `self` modulo `other`. Dimension moduli are computed as follows: ```python tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n) tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `self` modulo `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value % other.value) def __rmod__(self, other): """Returns `other` modulo `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `other` modulo `self`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented return other % self def __lt__(self, other): """Returns True if `self` is known to be less than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) < tf.Dimension(n)) == (m < n) (tf.Dimension(m) < tf.Dimension(None)) == None (tf.Dimension(None) < tf.Dimension(n)) == None (tf.Dimension(None) < tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value < other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value < other.value def __le__(self, other): """Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) <= tf.Dimension(n)) == (m <= n) (tf.Dimension(m) <= tf.Dimension(None)) == None (tf.Dimension(None) <= tf.Dimension(n)) == None (tf.Dimension(None) <= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value <= other.value def __gt__(self, other): """Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) > tf.Dimension(n)) == (m > n) (tf.Dimension(m) > tf.Dimension(None)) == None (tf.Dimension(None) > tf.Dimension(n)) == None (tf.Dimension(None) > tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value > other.value def __ge__(self, other): """Returns True if `self` is known to be greater than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) >= tf.Dimension(n)) == (m >= n) (tf.Dimension(m) >= tf.Dimension(None)) == None (tf.Dimension(None) >= tf.Dimension(n)) == None (tf.Dimension(None) >= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value >= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value >= other.value def __reduce__(self): return Dimension, (self._value,) def as_dimension(value): """Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value. """ if isinstance(value, Dimension): return value else: return Dimension(value) @tf_export("TensorShape") class TensorShape(object): """Represents the shape of a `Tensor`. A `TensorShape` represents a possibly-partial shape specification for a `Tensor`. It may be one of the following: * *Fully-known shape:* has a known number of dimensions and a known size for each dimension. e.g. `TensorShape([16, 256])` * *Partially-known shape:* has a known number of dimensions, and an unknown size for one or more dimension. e.g. `TensorShape([None, 256])` * *Unknown shape:* has an unknown number of dimensions, and an unknown size in all dimensions. e.g. `TensorShape(None)` If a tensor is produced by an operation of type `"Foo"`, its shape may be inferred if there is a registered shape function for `"Foo"`. See [Shape functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c) for details of shape functions and how to register them. Alternatively, the shape may be set explicitly using `tf.Tensor.set_shape`. """ def __init__(self, dims): """Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. DEPRECATED: A single integer is treated as a singleton list. Raises: TypeError: If dims cannot be converted to a list of dimensions. """ # TODO(irving): Eliminate the single integer special case. if dims is None: self._dims = None elif isinstance(dims, compat.bytes_or_text_types): raise TypeError("A string has ambiguous TensorShape, please wrap in a " "list or convert to an int: %s" % dims) elif isinstance(dims, tensor_shape_pb2.TensorShapeProto): if dims.unknown_rank: self._dims = None else: self._dims = [ # Protos store variable-size dimensions as -1 as_dimension(dim.size if dim.size != -1 else None) for dim in dims.dim ] elif isinstance(dims, TensorShape): self._dims = dims.dims else: try: dims_iter = iter(dims) except TypeError: # Treat as a singleton dimension self._dims = [as_dimension(dims)] else: # Got a list of dimensions self._dims = [as_dimension(d) for d in dims_iter] self._ndims = None def __repr__(self): return "TensorShape(%r)" % self._dims def __str__(self): if self.ndims is None: return "<unknown>" elif self.ndims == 1: return "(%s,)" % self._dims[0] else: return "(%s)" % ", ".join(str(d) for d in self._dims) @property def dims(self): """Returns a list of Dimensions, or None if the shape is unspecified.""" return self._dims @dims.setter def dims(self, dims): self._dims = dims self._ndims = None @property def ndims(self): """Returns the rank of this shape, or None if it is unspecified.""" if self._dims is None: return None else: if self._ndims is None: self._ndims = len(self._dims) return self._ndims def __len__(self): """Returns the rank of this shape, or raises ValueError if unspecified.""" if self._dims is None: raise ValueError("Cannot take the length of Shape with unknown rank.") return self.ndims def __bool__(self): """Returns True if this shape contains non-zero information.""" return self._dims is not None # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __iter__(self): """Returns `self.dims` if the rank is known, otherwise raises ValueError.""" if self._dims is None: raise ValueError("Cannot iterate over a shape with unknown rank.") else: return iter(self._dims) def __getitem__(self, key): """Returns the value of a dimension or a shape, depending on the key. Args: key: If `key` is an integer, returns the dimension at that index; otherwise if `key` is a slice, returns a TensorShape whose dimensions are those selected by the slice from `self`. Returns: A dimension if `key` is an integer, or a `TensorShape` if `key` is a slice. Raises: ValueError: If `key` is a slice and `self` is completely unknown and the step is set. """ if self._dims is not None: if isinstance(key, slice): return TensorShape(self._dims[key]) else: return self._dims[key] else: if isinstance(key, slice): start = key.start if key.start is not None else 0 stop = key.stop if key.step is not None: # TODO(mrry): Handle these maybe. raise ValueError("Steps are not yet handled") if stop is None: # NOTE(mrry): This implies that TensorShape(None) is compatible with # TensorShape(None)[1:], which is obviously not true. It would be # possible to track the number of dimensions symbolically, # and perhaps we should do that. return unknown_shape() elif start < 0 or stop < 0: # TODO(mrry): Handle this better, as it will be useful for handling # suffixes of otherwise unknown shapes. return unknown_shape() else: return unknown_shape(ndims=stop - start) else: return Dimension(None) def num_elements(self): """Returns the total number of elements, or none for incomplete shapes.""" if self.is_fully_defined(): size = 1 for dim in self._dims: size *= dim.value return size else: return None def merge_with(self, other): """Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible. """ other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not compatible" % (self, other)) def concatenate(self, other): """Returns the concatenation of the dimension in `self` and `other`. *N.B.* If either `self` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another `TensorShape`. Returns: A `TensorShape` whose dimensions are the concatenation of the dimensions in `self` and `other`. """ # TODO(mrry): Handle the case where we concatenate a known shape with a # completely unknown shape, so that we can use the partial information. other = as_shape(other) if self._dims is None or other.dims is None: return unknown_shape() else: return TensorShape(self._dims + other.dims) def assert_same_rank(self, other): """Raises an exception if `self` and `other` do not have compatible ranks. Args: other: Another `TensorShape`. Raises: ValueError: If `self` and `other` do not represent shapes with the same rank. """ other = as_shape(other) if self.ndims is not None and other.ndims is not None: if self.ndims != other.ndims: raise ValueError("Shapes %s and %s must have the same rank" % (self, other)) def assert_has_rank(self, rank): """Raises an exception if `self` is not compatible with the given `rank`. Args: rank: An integer. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ if self.ndims not in (None, rank): raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank(self, rank): """Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ try: return self.merge_with(unknown_shape(ndims=rank)) except ValueError: raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank_at_least(self, rank): """Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`. """ if self.ndims is not None and self.ndims < rank: raise ValueError("Shape %s must have rank at least %d" % (self, rank)) else: return self def with_rank_at_most(self, rank): """Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`. """ if self.ndims is not None and self.ndims > rank: raise ValueError("Shape %s must have rank at most %d" % (self, rank)) else: return self def is_compatible_with(self, other): """Returns True iff `self` is compatible with `other`. Two possibly-partially-defined shapes are compatible if there exists a fully-defined shape that both shapes can represent. Thus, compatibility allows the shape inference code to reason about partially-defined shapes. For example: * TensorShape(None) is compatible with all shapes. * TensorShape([None, None]) is compatible with all two-dimensional shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is not compatible with, for example, TensorShape([None]) or TensorShape([None, None, None]). * TensorShape([32, None]) is compatible with all two-dimensional shapes with size 32 in the 0th dimension, and also TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]). * TensorShape([32, 784]) is compatible with itself, and also TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32, 1, 784]) or TensorShape([None]). The compatibility relation is reflexive and symmetric, but not transitive. For example, TensorShape([32, 784]) is compatible with TensorShape(None), and TensorShape(None) is compatible with TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with TensorShape([4, 4]). Args: other: Another TensorShape. Returns: True iff `self` is compatible with `other`. """ other = as_shape(other) if self._dims is not None and other.dims is not None: if self.ndims != other.ndims: return False for x_dim, y_dim in zip(self._dims, other.dims): if not x_dim.is_compatible_with(y_dim): return False return True def assert_is_compatible_with(self, other): """Raises exception if `self` and `other` do not represent the same shape. This method can be used to assert that there exists a shape that both `self` and `other` represent. Args: other: Another TensorShape. Raises: ValueError: If `self` and `other` do not represent the same shape. """ if not self.is_compatible_with(other): raise ValueError("Shapes %s and %s are incompatible" % (self, other)) def most_specific_compatible_shape(self, other): """Returns the most specific TensorShape compatible with `self` and `other`. * TensorShape([None, 1]) is the most specific TensorShape compatible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also compatible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape compatible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes compatible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another `TensorShape`. Returns: A `TensorShape` which is the most specific compatible shape of `self` and `other`. """ other = as_shape(other) if self._dims is None or other.dims is None or self.ndims != other.ndims: return unknown_shape() dims = [(Dimension(None))] * self.ndims for i, (d1, d2) in enumerate(zip(self._dims, other.dims)): if d1 is not None and d2 is not None and d1 == d2: dims[i] = d1 return TensorShape(dims) def is_fully_defined(self): """Returns True iff `self` is fully defined in every dimension.""" return (self._dims is not None and all(dim.value is not None for dim in self._dims)) def assert_is_fully_defined(self): """Raises an exception if `self` is not fully defined in every dimension. Raises: ValueError: If `self` does not have a known value for every dimension. """ if not self.is_fully_defined(): raise ValueError("Shape %s is not fully defined" % self) def as_list(self): """Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank. """ if self._dims is None: raise ValueError("as_list() is not defined on an unknown TensorShape.") return [dim.value for dim in self._dims] def as_proto(self): """Returns this shape as a `TensorShapeProto`.""" if self._dims is None: return tensor_shape_pb2.TensorShapeProto(unknown_rank=True) else: return tensor_shape_pb2.TensorShapeProto(dim=[ tensor_shape_pb2.TensorShapeProto.Dim(size=-1 if d.value is None else d.value) for d in self._dims ]) def __eq__(self, other): """Returns True if `self` is equivalent to `other`.""" try: other = as_shape(other) except TypeError: return NotImplemented return self._dims == other.dims def __ne__(self, other): """Returns True if `self` is known to be different from `other`.""" try: other = as_shape(other) except TypeError: return NotImplemented if self.ndims is None or other.ndims is None: raise ValueError("The inequality of unknown TensorShapes is undefined.") if self.ndims != other.ndims: return True return self._dims != other.dims def __reduce__(self): return TensorShape, (self._dims,) def as_shape(shape): """Converts the given object to a TensorShape.""" if isinstance(shape, TensorShape): return shape else: return TensorShape(shape) def unknown_shape(ndims=None): """Returns an unknown TensorShape, optionally with a known rank. Args: ndims: (Optional) If specified, the number of dimensions in the shape. Returns: An unknown TensorShape. """ if ndims is None: return TensorShape(None) else: return TensorShape([Dimension(None)] * ndims) _SCALAR_SHAPE = TensorShape([]) def scalar(): """Returns a shape representing a scalar.""" return _SCALAR_SHAPE def vector(length): """Returns a shape representing a vector. Args: length: The length of the vector, which may be None if unknown. Returns: A TensorShape representing a vector of the given length. """ return TensorShape([length]) def matrix(rows, cols): """Returns a shape representing a matrix. Args: rows: The number of rows in the matrix, which may be None if unknown. cols: The number of columns in the matrix, which may be None if unknown. Returns: A TensorShape representing a matrix of the given size. """ return TensorShape([rows, cols])
apache-2.0
pinkavaj/gnuradio
gr-digital/python/digital/crc.py
59
1285
# # Copyright 2005,2007,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gru import digital_swig as digital import struct def gen_and_append_crc32(s): crc = digital.crc32(s) return s + struct.pack(">I", gru.hexint(crc) & 0xFFFFFFFF) def check_crc32(s): if len(s) < 4: return (False, '') msg = s[:-4] #print "msg = '%s'" % (msg,) actual = digital.crc32(msg) (expected,) = struct.unpack(">I", s[-4:]) # print "actual =", hex(actual), "expected =", hex(expected) return (actual == expected, msg)
gpl-3.0
chkir/django-cms
cms/models/pluginmodel.py
31
20652
# -*- coding: utf-8 -*- from datetime import date import json from operator import itemgetter import os import warnings from django.conf import settings from django.core.urlresolvers import NoReverseMatch from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db import models from django.db.models import signals, Model from django.db.models.base import model_unpickle, ModelBase from django.db.models.query_utils import DeferredAttribute from django.utils import six, timezone from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.safestring import mark_safe from django.utils.six.moves import filter from django.utils.translation import ugettext_lazy as _ from cms.exceptions import DontUsePageAttributeWarning from cms.models.placeholdermodel import Placeholder from cms.plugin_rendering import PluginContext, render_plugin from cms.utils import get_cms_setting from cms.utils.helpers import reversion_register from cms.utils.urlutils import admin_reverse from treebeard.mp_tree import MP_Node class BoundRenderMeta(object): def __init__(self, meta): self.index = 0 self.total = 1 self.text_enabled = getattr(meta, 'text_enabled', False) class PluginModelBase(ModelBase): """ Metaclass for all CMSPlugin subclasses. This class should not be used for any other type of models. """ def __new__(cls, name, bases, attrs): # remove RenderMeta from the plugin class attr_meta = attrs.pop('RenderMeta', None) # create a new class (using the super-metaclass) new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs) # if there is a RenderMeta in attrs, use this one # else try to use the one from the superclass (if present) meta = attr_meta or getattr(new_class, '_render_meta', None) treebeard_view_fields = (f for f in new_class._meta.fields if f.name in ('depth', 'numchild', 'path')) for field in treebeard_view_fields: field.editable = False # set a new BoundRenderMeta to prevent leaking of state new_class._render_meta = BoundRenderMeta(meta) return new_class @python_2_unicode_compatible class CMSPlugin(six.with_metaclass(PluginModelBase, MP_Node)): ''' The base class for a CMS plugin model. When defining a new custom plugin, you should store plugin-instance specific information on a subclass of this class. An example for this would be to store the number of pictures to display in a galery. Two restrictions apply when subclassing this to use in your own models: 1. Subclasses of CMSPlugin *cannot be further subclassed* 2. Subclasses of CMSPlugin cannot define a "text" field. ''' placeholder = models.ForeignKey(Placeholder, editable=False, null=True) parent = models.ForeignKey('self', blank=True, null=True, editable=False) position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False) language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False) plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False) creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now) changed_date = models.DateTimeField(auto_now=True) child_plugin_instances = None translatable_content_excluded_fields = [] class Meta: app_label = 'cms' class RenderMeta: index = 0 total = 1 text_enabled = False def __reduce__(self): """ Provide pickling support. Normally, this just dispatches to Python's standard handling. However, for models with deferred field loading, we need to do things manually, as they're dynamically created classes and only module-level classes can be pickled by the default path. """ data = self.__dict__ # The obvious thing to do here is to invoke super().__reduce__() # for the non-deferred case. Don't do that. # On Python 2.4, there is something wierd with __reduce__, # and as a result, the super call will cause an infinite recursion. # See #10547 and #12121. deferred_fields = [f for f in self._meta.fields if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)] model = self._meta.proxy_for_model return (model_unpickle, (model, deferred_fields), data) def __str__(self): return force_text(self.pk) def get_plugin_name(self): from cms.plugin_pool import plugin_pool return plugin_pool.get_plugin(self.plugin_type).name def get_short_description(self): instance = self.get_plugin_instance()[0] if instance is not None: return force_text(instance) return _("<Empty>") def get_plugin_class(self): from cms.plugin_pool import plugin_pool return plugin_pool.get_plugin(self.plugin_type) def get_plugin_class_instance(self, admin=None): plugin_class = self.get_plugin_class() # needed so we have the same signature as the original ModelAdmin return plugin_class(plugin_class.model, admin) def get_plugin_instance(self, admin=None): ''' Given a plugin instance (usually as a CMSPluginBase), this method returns a tuple containing: instance - The instance AS THE APPROPRIATE SUBCLASS OF CMSPluginBase and not necessarily just 'self', which is often just a CMSPluginBase, plugin - the associated plugin class instance (subclass of CMSPlugin) ''' plugin = self.get_plugin_class_instance(admin) if hasattr(self, "_inst"): return self._inst, plugin if plugin.model != self.__class__: # and self.__class__ == CMSPlugin: # (if self is actually a subclass, getattr below would break) try: instance = plugin.model.objects.get(cmsplugin_ptr=self) instance._render_meta = self._render_meta except (AttributeError, ObjectDoesNotExist): instance = None else: instance = self self._inst = instance return self._inst, plugin def render_plugin(self, context=None, placeholder=None, admin=False, processors=None): instance, plugin = self.get_plugin_instance() if instance and not (admin and not plugin.admin_preview): if not placeholder or not isinstance(placeholder, Placeholder): placeholder = instance.placeholder placeholder_slot = placeholder.slot current_app = context.current_app if context else None context = PluginContext(context, instance, placeholder, current_app=current_app) context = plugin.render(context, instance, placeholder_slot) request = context.get('request', None) page = None if request: page = request.current_page plugin.cms_plugin_instance = instance context['allowed_child_classes'] = plugin.get_child_classes(placeholder_slot, page) if plugin.render_plugin: template = plugin._get_render_template(context, instance, placeholder) if not template: raise ValidationError("plugin has no render_template: %s" % plugin.__class__) else: template = None return render_plugin(context, instance, placeholder, template, processors, context.current_app) else: from cms.middleware.toolbar import toolbar_plugin_processor if processors and toolbar_plugin_processor in processors: if not placeholder: placeholder = self.placeholder current_app = context.current_app if context else None context = PluginContext(context, self, placeholder, current_app=current_app) template = None return render_plugin(context, self, placeholder, template, processors, context.current_app) return "" def get_media_path(self, filename): pages = self.placeholder.page_set.all() if pages.count(): return pages[0].get_media_path(filename) else: # django 1.0.2 compatibility today = date.today() return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'), str(today.year), str(today.month), str(today.day), filename) @property def page(self): warnings.warn( "Don't use the page attribute on CMSPlugins! CMSPlugins are not " "guaranteed to have a page associated with them!", DontUsePageAttributeWarning) return self.placeholder.page if self.placeholder_id else None def get_instance_icon_src(self): """ Get src URL for instance's icon """ instance, plugin = self.get_plugin_instance() return plugin.icon_src(instance) if instance else u'' def get_instance_icon_alt(self): """ Get alt text for instance's icon """ instance, plugin = self.get_plugin_instance() return force_text(plugin.icon_alt(instance)) if instance else u'' def save(self, no_signals=False, *args, **kwargs): if not self.depth: if self.parent_id or self.parent: self.parent.add_child(instance=self) else: if not self.position and not self.position == 0: self.position = CMSPlugin.objects.filter(parent__isnull=True, language=self.language, placeholder_id=self.placeholder_id).count() self.add_root(instance=self) return super(CMSPlugin, self).save() def reload(self): return CMSPlugin.objects.get(pk=self.pk) def move(self, target, pos=None): super(CMSPlugin, self).move(target, pos) self = self.reload() try: new_pos = max(CMSPlugin.objects.filter(parent_id=self.parent_id, placeholder_id=self.placeholder_id, language=self.language).exclude(pk=self.pk).order_by('depth', 'path').values_list('position', flat=True)) + 1 except ValueError: # This is the first plugin in the set new_pos = 0 self.position = new_pos self.save() return self.reload() def set_base_attr(self, plugin): for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'depth', 'path', 'numchild', 'pk', 'position']: setattr(plugin, attr, getattr(self, attr)) def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False): """ Copy this plugin and return the new plugin. The logic of this method is the following: # get a new generic plugin instance # assign the position in the plugin tree # save it to let mptt/treebeard calculate the tree attributes # then get a copy of the current plugin instance # assign to it the id of the generic plugin instance above; this will effectively change the generic plugin created above into a concrete one # copy the tree related attributes from the generic plugin to the concrete one # save the concrete plugin # trigger the copy relations # return the generic plugin instance This copy logic is required because we don't know what the fields of the real plugin are. By getting another instance of it at step 4 and then overwriting its ID at step 5, the ORM will copy the custom fields for us. """ try: plugin_instance, cls = self.get_plugin_instance() except KeyError: # plugin type not found anymore return # set up some basic attributes on the new_plugin new_plugin = CMSPlugin() new_plugin.placeholder = target_placeholder # we assign a parent to our new plugin parent_cache[self.pk] = new_plugin if self.parent: parent = parent_cache[self.parent_id] parent = CMSPlugin.objects.get(pk=parent.pk) new_plugin.parent_id = parent.pk new_plugin.parent = parent new_plugin.language = target_language new_plugin.plugin_type = self.plugin_type if no_signals: from cms.signals import pre_save_plugins signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin) new_plugin._no_reorder = True new_plugin.save() if plugin_instance: # get a new instance so references do not get mixed up plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk) plugin_instance.pk = new_plugin.pk plugin_instance.id = new_plugin.pk plugin_instance.placeholder = target_placeholder plugin_instance.cmsplugin_ptr = new_plugin plugin_instance.language = target_language plugin_instance.parent = new_plugin.parent plugin_instance.depth = new_plugin.depth plugin_instance.path = new_plugin.path plugin_instance.numchild = new_plugin.numchild plugin_instance._no_reorder = True plugin_instance.save() old_instance = plugin_instance.__class__.objects.get(pk=self.pk) plugin_instance.copy_relations(old_instance) if no_signals: signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') return new_plugin @classmethod def fix_tree(cls, destructive=False): """ Fixes the plugin tree by first calling treebeard fix_tree and the recalculating the correct position property for each plugin. """ from cms.utils.plugins import reorder_plugins super(CMSPlugin, cls).fix_tree(destructive) for placeholder in Placeholder.objects.all(): for language, __ in settings.LANGUAGES: order = CMSPlugin.objects.filter(placeholder_id=placeholder.pk, language=language, parent_id__isnull=True ).values_list('pk', flat=True) reorder_plugins(placeholder, None, language, order) for plugin in CMSPlugin.objects.filter(placeholder_id=placeholder.pk, language=language).order_by('depth', 'path'): order = CMSPlugin.objects.filter(parent_id=plugin.pk).values_list('pk', flat=True) reorder_plugins(placeholder, plugin.pk, language, order) def post_copy(self, old_instance, new_old_ziplist): """ Handle more advanced cases (eg Text Plugins) after the original is copied """ pass def copy_relations(self, old_instance): """ Handle copying of any relations attached to this plugin. Custom plugins have to do this themselves! """ pass def has_change_permission(self, request): page = self.placeholder.page if self.placeholder else None if page: return page.has_change_permission(request) elif self.placeholder: return self.placeholder.has_change_permission(request) return False def get_position_in_placeholder(self): """ 1 based position! """ return self.position + 1 def get_breadcrumb(self): from cms.models import Page model = self.placeholder._get_attached_model() or Page breadcrumb = [] for parent in self.get_ancestors(): try: url = force_text( admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name), args=[parent.pk])) except NoReverseMatch: url = force_text( admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name), args=[parent.pk])) breadcrumb.append({'title': force_text(parent.get_plugin_name()), 'url': url}) try: url = force_text( admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name), args=[self.pk])) except NoReverseMatch: url = force_text( admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name), args=[self.pk])) breadcrumb.append({'title': force_text(self.get_plugin_name()), 'url': url}) return breadcrumb def get_breadcrumb_json(self): result = json.dumps(self.get_breadcrumb()) result = mark_safe(result) return result def num_children(self): return self.numchild def notify_on_autoadd(self, request, conf): """ Method called when we auto add this plugin via default_plugins in CMS_PLACEHOLDER_CONF. Some specific plugins may have some special stuff to do when they are auto added. """ pass def notify_on_autoadd_children(self, request, conf, children): """ Method called when we auto add children to this plugin via default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF. Some specific plugins may have some special stuff to do when we add children to them. ie : TextPlugin must update its content to add HTML tags to be able to see his children in WYSIWYG. """ pass def get_translatable_content(self): """ Returns {field_name: field_contents} for translatable fields, where field_contents > '' """ fields = (f for f in self._meta.fields if isinstance(f, (models.CharField, models.TextField)) and f.editable and not f.choices and f.name not in self.translatable_content_excluded_fields) return dict(filter(itemgetter(1), ((f.name, getattr(self, f.name)) for f in fields))) def set_translatable_content(self, fields): for field, value in fields.items(): setattr(self, field, value) self.save() return all(getattr(self, field) == value for field, value in fields.items()) def delete(self, no_mp=False, *args, **kwargs): if no_mp: Model.delete(self, *args, **kwargs) else: super(CMSPlugin, self).delete(*args, **kwargs) @property def add_url(self): """ Returns a custom url to add plugin instances """ return None @property def edit_url(self): """ Returns a custom url to edit plugin instances """ return None @property def move_url(self): """ Returns a custom url to move plugin instances """ return None @property def delete_url(self): """ Returns a custom url to delete plugin instances """ return None @property def copy_url(self): """ Returns a custom url to copy plugin instances """ return None reversion_register(CMSPlugin) def get_plugin_media_path(instance, filename): """ Django 1.7 requires that unbound function used in fields' definitions are defined outside the parent class (see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values) This function is used withing field definition: file = models.FileField(_("file"), upload_to=get_plugin_media_path) and it invokes the bounded method on the given instance at runtime """ return instance.get_media_path(filename)
bsd-3-clause