repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
hrjn/scikit-learn
examples/applications/plot_model_complexity_influence.py
323
6372
""" ========================== Model Complexity Influence ========================== Demonstrate how model complexity influences both prediction accuracy and computational performance. The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for regression (resp. classification). For each class of models we make the model complexity vary through the choice of relevant model parameters and measure the influence on both computational performance (latency) and predictive power (MSE or Hamming Loss). """ print(__doc__) # Author: Eustache Diemert <eustache@diemert.fr> # License: BSD 3 clause import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.parasite_axes import host_subplot from mpl_toolkits.axisartist.axislines import Axes from scipy.sparse.csr import csr_matrix from sklearn import datasets from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.svm.classes import NuSVR from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor from sklearn.linear_model.stochastic_gradient import SGDClassifier from sklearn.metrics import hamming_loss ############################################################################### # Routines # initialize random generator np.random.seed(0) def generate_data(case, sparse=False): """Generate regression/classification data.""" bunch = None if case == 'regression': bunch = datasets.load_boston() elif case == 'classification': bunch = datasets.fetch_20newsgroups_vectorized(subset='all') X, y = shuffle(bunch.data, bunch.target) offset = int(X.shape[0] * 0.8) X_train, y_train = X[:offset], y[:offset] X_test, y_test = X[offset:], y[offset:] if sparse: X_train = csr_matrix(X_train) X_test = csr_matrix(X_test) else: X_train = np.array(X_train) X_test = np.array(X_test) y_test = np.array(y_test) y_train = np.array(y_train) data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test} return data def benchmark_influence(conf): """ Benchmark influence of :changing_param: on both MSE and latency. """ prediction_times = [] prediction_powers = [] complexities = [] for param_value in conf['changing_param_values']: conf['tuned_params'][conf['changing_param']] = param_value estimator = conf['estimator'](**conf['tuned_params']) print("Benchmarking %s" % estimator) estimator.fit(conf['data']['X_train'], conf['data']['y_train']) conf['postfit_hook'](estimator) complexity = conf['complexity_computer'](estimator) complexities.append(complexity) start_time = time.time() for _ in range(conf['n_samples']): y_pred = estimator.predict(conf['data']['X_test']) elapsed_time = (time.time() - start_time) / float(conf['n_samples']) prediction_times.append(elapsed_time) pred_score = conf['prediction_performance_computer']( conf['data']['y_test'], y_pred) prediction_powers.append(pred_score) print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % ( complexity, conf['prediction_performance_label'], pred_score, elapsed_time)) return prediction_powers, prediction_times, complexities def plot_influence(conf, mse_values, prediction_times, complexities): """ Plot influence of model complexity on both accuracy and latency. """ plt.figure(figsize=(12, 6)) host = host_subplot(111, axes_class=Axes) plt.subplots_adjust(right=0.75) par1 = host.twinx() host.set_xlabel('Model Complexity (%s)' % conf['complexity_label']) y1_label = conf['prediction_performance_label'] y2_label = "Time (s)" host.set_ylabel(y1_label) par1.set_ylabel(y2_label) p1, = host.plot(complexities, mse_values, 'b-', label="prediction error") p2, = par1.plot(complexities, prediction_times, 'r-', label="latency") host.legend(loc='upper right') host.axis["left"].label.set_color(p1.get_color()) par1.axis["right"].label.set_color(p2.get_color()) plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__) plt.show() def _count_nonzero_coefficients(estimator): a = estimator.coef_.toarray() return np.count_nonzero(a) ############################################################################### # main code regression_data = generate_data('regression') classification_data = generate_data('classification', sparse=True) configurations = [ {'estimator': SGDClassifier, 'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss': 'modified_huber', 'fit_intercept': True}, 'changing_param': 'l1_ratio', 'changing_param_values': [0.25, 0.5, 0.75, 0.9], 'complexity_label': 'non_zero coefficients', 'complexity_computer': _count_nonzero_coefficients, 'prediction_performance_computer': hamming_loss, 'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)', 'postfit_hook': lambda x: x.sparsify(), 'data': classification_data, 'n_samples': 30}, {'estimator': NuSVR, 'tuned_params': {'C': 1e3, 'gamma': 2 ** -15}, 'changing_param': 'nu', 'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9], 'complexity_label': 'n_support_vectors', 'complexity_computer': lambda x: len(x.support_vectors_), 'data': regression_data, 'postfit_hook': lambda x: x, 'prediction_performance_computer': mean_squared_error, 'prediction_performance_label': 'MSE', 'n_samples': 30}, {'estimator': GradientBoostingRegressor, 'tuned_params': {'loss': 'ls'}, 'changing_param': 'n_estimators', 'changing_param_values': [10, 50, 100, 200, 500], 'complexity_label': 'n_trees', 'complexity_computer': lambda x: x.n_estimators, 'data': regression_data, 'postfit_hook': lambda x: x, 'prediction_performance_computer': mean_squared_error, 'prediction_performance_label': 'MSE', 'n_samples': 30}, ] for conf in configurations: prediction_performances, prediction_times, complexities = \ benchmark_influence(conf) plot_influence(conf, prediction_performances, prediction_times, complexities)
bsd-3-clause
rasbt/advent-of-code-2016
python_code/aoc_01_02.py
1
4237
import collections """ source: http://adventofcode.com/2016/day/1 DESCRIPTION Santa's sleigh uses a very high-precision clock to guide its movements, and the clock's oscillator is regulated by stars. Unfortunately, the stars have been stolen... by the Easter Bunny. To save Christmas, Santa needs you to retrieve all fifty stars by December 25th. Collect stars by solving puzzles. Two puzzles will be made available on each day in the advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck! You're airdropped near Easter Bunny Headquarters in a city somewhere. "Near", unfortunately, is as close as you can get - the instructions on the Easter Bunny Recruiting Document the Elves intercepted start here, and nobody had time to work them out further. The Document indicates that you should start at the given coordinates (where you just landed) and face North. Then, follow the provided sequence: either turn left (L) or right (R) 90 degrees, then walk forward the given number of blocks, ending at a new intersection. There's no time to follow such ridiculous instructions on foot, though, so you take a moment and work out the destination. Given that you can only walk on the street grid of the city, how far is the shortest path to the destination? For example: Following R2, L3 leaves you 2 blocks East and 3 blocks North, or 5 blocks away. R2, R2, R2 leaves you 2 blocks due South of your starting position, which is 2 blocks away. R5, L5, R5, R3 leaves you 12 blocks away. How many blocks away is Easter Bunny HQ?""" p_input = """R4, R3, R5, L3, L5, R2, L2, R5, L2, R5, R5, R5, R1, R3, L2, L2, L1, R5, L3, R1, L2, R1, L3, L5, L1, R3, L4, R2, R4, L3, L1, R4, L4, R3, L5, L3, R188, R4, L1, R48, L5, R4, R71, R3, L2, R188, L3, R2, L3, R3, L5, L1, R1, L2, L4, L2, R5, L3, R3, R3, R4, L3, L4, R5, L4, L4, R3, R4, L4, R1, L3, L1, L1, R4, R1, L4, R1, L1, L3, R2, L2, R2, L1, R5, R3, R4, L5, R2, R5, L5, R1, R2, L1, L3, R3, R1, R3, L4, R4, L4, L1, R1, L2, L2, L4, R1, L3, R4, L2, R3, L1, L5, R4, R5, R2, R5, R1, R5, R1, R3, L3, L2, L2, L5, R2, L2, R5, R5, L2, R3, L5, R5, L2, R4, R2, L1, R3, L5, R3, R2, R5, L1, R3, L2, R2, R1""" """ --- Part Two --- Then, you notice the instructions continue on the back of the Recruiting Document. Easter Bunny HQ is actually at the first location you visit twice. For example, if your instructions are R8, R4, R4, R8, the first location you visit twice is 4 blocks away, due East. How many blocks away is the first location you visit twice? """ def walk(input_string): end_position = [0, 0] all_positions = set() first_pos_visited_twice = () dq = collections.deque('NESW') curr_direction = dq[0] input_list = input_string.split(',') def visit_all(): nonlocal first_pos_visited_twice if not first_pos_visited_twice: curr_pos = tuple(end_position) if curr_pos in all_positions: first_pos_visited_twice = curr_pos else: all_positions.add(curr_pos) for i in input_list: i = i.strip() turn, strides = i[0], int(i[1:]) if turn == 'R': dq.rotate(-1) else: dq.rotate() curr_direction = dq[0] for i in range(strides): if curr_direction == 'N': end_position[1] += 1 elif curr_direction == 'E': end_position[0] += 1 elif curr_direction == 'S': end_position[1] -= 1 else: end_position[0] -= 1 visit_all() return end_position, first_pos_visited_twice def compute_manhattan_dist(end_position): mdist = abs(0 - end_position[0]) + abs(0 - end_position[1]) return mdist def test_1(): test_input = "R8, R4, R4, R8" end_pos, first_pos_visited_twice = walk(test_input) mdist = compute_manhattan_dist(first_pos_visited_twice) assert mdist == 4 def quiz_solution_p2(): end_pos, first_pos_visited_twice = walk(p_input) mdist = compute_manhattan_dist(first_pos_visited_twice) print('Quiz solution part 2:', mdist) if __name__ == "__main__": test_1() quiz_solution_p2()
mit
loic/django
tests/gis_tests/maps/tests.py
322
2099
# -*- coding: utf-8 -*- from __future__ import unicode_literals from unittest import skipUnless from django.contrib.gis.geos import HAS_GEOS from django.test import SimpleTestCase from django.test.utils import modify_settings, override_settings from django.utils.encoding import force_text GOOGLE_MAPS_API_KEY = 'XXXX' @skipUnless(HAS_GEOS, 'Geos is required.') @modify_settings( INSTALLED_APPS={'append': 'django.contrib.gis'}, ) class GoogleMapsTest(SimpleTestCase): @override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY) def test_google_map_scripts(self): """ Testing GoogleMap.scripts() output. See #20773. """ from django.contrib.gis.maps.google.gmap import GoogleMap google_map = GoogleMap() scripts = google_map.scripts self.assertIn(GOOGLE_MAPS_API_KEY, scripts) self.assertIn("new GMap2", scripts) @override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY) def test_unicode_in_google_maps(self): """ Test that GoogleMap doesn't crash with non-ASCII content. """ from django.contrib.gis.geos import Point from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker center = Point(6.146805, 46.227574) marker = GMarker(center, title='En français !') google_map = GoogleMap(center=center, zoom=18, markers=[marker]) self.assertIn("En français", google_map.scripts) def test_gevent_html_safe(self): from django.contrib.gis.maps.google.overlays import GEvent event = GEvent('click', 'function() {location.href = "http://www.google.com"}') self.assertTrue(hasattr(GEvent, '__html__')) self.assertEqual(force_text(event), event.__html__()) def test_goverlay_html_safe(self): from django.contrib.gis.maps.google.overlays import GOverlayBase overlay = GOverlayBase() overlay.js_params = '"foo", "bar"' self.assertTrue(hasattr(GOverlayBase, '__html__')) self.assertEqual(force_text(overlay), overlay.__html__())
bsd-3-clause
darren-wang/gl
glance/async/flows/introspect.py
6
3153
# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging from oslo_concurrency import processutils as putils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from glance.async import utils from glance import i18n _LE = i18n._LE _LI = i18n._LI LOG = logging.getLogger(__name__) class _Introspect(utils.OptionalTask): """Taskflow to pull the embedded metadata out of image file""" def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Introspect, self).__init__( name='%s-Introspect-%s' % (task_type, task_id)) def execute(self, image_id, file_path): """Does the actual introspection :param image_id: Glance image ID :param file_path: Path to the file being introspected """ try: stdout, stderr = putils.trycmd('qemu-img', 'info', '--output=json', file_path, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: # NOTE(flaper87): errno == 2 means the executable file # was not found. For now, log an error and move forward # until we have a better way to enable/disable optional # tasks. if exc.errno != 2: with excutils.save_and_reraise_exception(): msg = (_LE('Failed to execute introspection ' '%(task_id)s: %(exc)s') % {'task_id': self.task_id, 'exc': exc.message}) LOG.error(msg) return if stderr: raise RuntimeError(stderr) metadata = json.loads(stdout) new_image = self.image_repo.get(image_id) new_image.virtual_size = metadata.get('virtual-size', 0) new_image.disk_format = metadata.get('format') self.image_repo.save(new_image) LOG.debug("%(task_id)s: Introspection successful: %(file)s" % {'task_id': self.task_id, 'file': file_path}) return new_image def get_flow(**kwargs): task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s" % {'task_type': task_type, 'id': task_id, 'repo': image_repo}) return lf.Flow(task_type).add( _Introspect(task_id, task_type, image_repo), )
apache-2.0
hiteshchoudhary/Airvengers
AirvengersGUI.py
1
7188
#!/usr/bin/env python #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# # Thanks for opting for GUI version of AirCrack set of tools. This project is in early stage and require your support. # # Project is based on Aircrack-ng set of tools and is specially designed to run on KALI LINUX. # # # # Designed by : Hitesh Choudhary # # Home page : www.HiteshChoudhary.com # # Email : hitesh@hiteshchoudhary.com # # Based on : www.Aircrack-ng.org # #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# from Canvas import Line from Tkinter import * from Tkinter import Frame, PhotoImage, Text, Label, Button import subprocess from textwrap import fill from tkFont import Font import tkFont import tkMessageBox class Feedback: def __init__(self, master): #min and max size of window #master.minsize(width=410, height=700) #master.maxsize(width=410, height=700) #end #title of window master.title("Airvengers") #end #for scrolling the page #end #for the style of fonts self.customFont = tkFont.Font(family="Helvetica", size=15) self.myfont = tkFont.Font(family="Helvetica", size=12) self.myfont2 = tkFont.Font(family="Helvetica", size=10) #end #header frame self.frame_header = Frame(master, bg="white") self.frame_header.pack(fill=BOTH, side=TOP, expand=True) self.logo = PhotoImage(file = "logoair.gif") Label(self.frame_header, image = self.logo).grid(row = 0, column = 0, sticky='sw', columnspan=2) #end #content frame self.frame_content = Frame(master, bg="white") self.frame_content.pack(fill=BOTH, side=TOP, expand=True) Label(self.frame_content, text = 'Based on:' ,font=self.myfont, wraplength =200, bg="white").grid(row = 0, column = 0, padx = 5, sticky = 'sw') Label(self.frame_content, text = 'GUI by Hitesh:', font=self.myfont, wraplength =200,padx=0, bg="white").grid(row = 0, column = 1, padx = 5, sticky = 'sw') Label(self.frame_content, text = 'Aircrack-ng' ,font=self.myfont, wraplength =300, bg="white").grid(row = 1, column = 0, padx = 5, sticky = 'sw') Label(self.frame_content, text = 'hitesh@hiteshchoudhary.com', font=self.myfont2, wraplength =300,padx=0, bg="white").grid(row = 1, column = 1, padx = 5, sticky = 'sw') #Label(self.frame_content, text = 'Comments:').grid(row = 2, column = 0, padx = 5, sticky = 'sw') #self.entry_name = Entry(self.frame_content, width = 24) #self.entry_email = Entry(self.frame_content, width = 24) #self.text_comments = Text(self.frame_content, width = 50, height = 10) #self.entry_name.grid(row = 1, column = 0, padx = 5) #self.entry_email.grid(row = 1, column = 1, padx = 5) #self.text_comments.grid(row = 3, column = 0, columnspan = 2, padx = 5) Button(self.frame_content, text = 'airmon-ng', command =AirmonNg, height=2, width=15, font=self.customFont).grid(row = 4, column = 0, padx = 5, pady = 5) Button(self.frame_content, text = 'aircrack-ng', command=AircrackNg, height=2, width=15, font=self.customFont).grid(row = 4, column = 1, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airdecap-ng' , command = AirdecapNg, height=2, width=15, font=self.customFont).grid(row = 5, column = 0, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airdecloak-ng', command = AirdecloakNg, height=2, width=15, font=self.customFont).grid(row = 5, column = 1, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airdrop-ng', command = AirdropNg, height=2, width=15, font=self.customFont).grid(row = 6, column = 0, padx = 5, pady = 5 ) Button(self.frame_content, text = 'aireplay-ng', command = AireplayNg, height=2, width=15, font=self.customFont).grid(row = 6, column = 1, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airgraph-ng', command = AirgraphNg, height=2, width=15, font=self.customFont).grid(row = 7, column = 0, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airbase-ng', command = AirbaseNg, height=2, width=15, font=self.customFont).grid(row = 7, column = 1, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airodump-ng', command = AirodumpNg, height=2, width=15, font=self.customFont).grid(row = 8, column = 0, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airolib-ng', command = AirolibNg, height=2, width=15, font=self.customFont).grid(row = 8, column = 1, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airserv-ng ', command = AirservNg, height=2, width=15, font=self.customFont).grid(row = 9, column = 0, padx = 5, pady = 5 ) Button(self.frame_content, text = 'airtun-ng ', command = AirtunNg, height=2, width=15, font=self.customFont).grid(row = 9, column = 1, padx = 5, pady = 5) def clear(self): self.entry_name.delete(0, 'end') self.entry_email.delete(0, 'end') self.text_comments.delete(1.0, 'end') def main(): root = Tk() feedback = Feedback(root) root.mainloop() #for open the next page def callback(): execfile("mygui3.py") return def AirmonNg(): subprocess.call(["python","Airmon-ng.py"]) def AirodumpNg(): subprocess.call(["python","Airodump-ng.py"]) def AirbaseNg(): subprocess.call(["python","Airbase-ng.py"]) def AircrackNg(): subprocess.call(["python","Aircrack-ng.py"]) def AirdecapNg(): subprocess.call(["python","Airdecap-ng.py"]) def AirdecloakNg(): subprocess.call(["python","Airdecloak-ng.py"]) def AirdropNg(): subprocess.call(["python","Airdrop-ng.py"]) def AireplayNg(): subprocess.call(["python","Aireplay-ng.py"]) def AirgraphNg(): subprocess.call(["python","Aigraph-ng.py"]) def AirolibNg(): subprocess.call(["python","Airolib-ng.py"]) def AirservNg(): subprocess.call(["python","Airserv-ng.py"]) def AirtunNg(): subprocess.call(["python","Airtun-ng.py"]) # end # def openFile1(self): # os.startfile("mygui2.py") if __name__ == "__main__": main()
gpl-2.0
hinerm/ITK
Wrapping/Generators/SwigInterface/pygccxml-1.0.0/pygccxml/parser/etree_scanner.py
14
1665
# Copyright 2004-2008 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import scanner import xml.etree.cElementTree as ElementTree class etree_saxifier_t(object): def __init__(self, etree, handler): self.__root_elem = etree.getroot() self.__handler = handler def saxify(self): self.__handler.startDocument() self.__recursive_saxify( self.__root_elem ) self.__handler.endDocument() def __recursive_saxify(self, element ): self.__handler.startElement( element.tag, element.attrib ) map( self.__recursive_saxify, element ) self.__handler.endElement( element.tag ) class etree_scanner_t( scanner.scanner_t ): def __init__(self, gccxml_file, decl_factory, *args ): scanner.scanner_t.__init__( self, gccxml_file, decl_factory, *args ) def read( self ): tree = ElementTree.parse( self.gccxml_file ) saxifier = etree_saxifier_t( tree, self ) saxifier.saxify() class ietree_scanner_t( scanner.scanner_t ): def __init__(self, gccxml_file, decl_factory, *args ): scanner.scanner_t.__init__( self, gccxml_file, decl_factory, *args ) def read( self ): context = ElementTree.iterparse(self.gccxml_file, events=("start", "end")) for event, elem in context: if event == 'start': self.startElement( elem.tag, elem.attrib ) else: self.endElement( elem.tag ) elem.clear() self.endDocument() etree_scanner_t = ietree_scanner_t
apache-2.0
d53dave/csaopt
csaopt/model/__init__.py
2
3353
""" This module offers the core CSAOpt modelling component: the :class:`~model.Model` class. """ import json from enum import Enum from typing import Dict, Any class Precision(Enum): """Enum for available calculation precisions""" Float32 = 'float32' Float64 = 'float64' class RandomDistribution(Enum): """Enum for available distributions of random values used during optimization""" Normal = 'normal' Uniform = 'uniform' class RequiredFunctions(Enum): """Enum for required functions that a model has to provide""" Initialize = 'initialize' GenerateNext = 'generate_next' Cool = 'cool' Evaluate = 'evaluate' Acceptance = 'acceptance_func' EmptyState = 'empty_state' class Model: """Core class containing functions and parameters for optimization Args: name: Optimization name dimensions: Number of dimensions of optimization domain precision: Required precision distribution: Required distribution of random values that will be provided by CSAOpt to the optimization opt_globals: Global variables available during optimization functions: Functions modelling the domain Attributes: name: Optimization name dimensions: Number of dimensions of optimization domain precision: Required precision distribution: Required distribution of random values that will be provided by CSAOpt to the optimization opt_globals: Global variables available during optimization functions: Functions modelling the domain """ @staticmethod def from_dict(d: Dict[str, Any]): """ Create model object from a dictionary (i.e. the serialized form) Args: d: Serialized model dictionary Returns: Model: A model object """ assert 'distribution' in d assert 'precision' in d assert 'globals' in d assert 'functions' in d distribution: RandomDistribution = d['distribution'] precision: Precision = d['precision'] return Model(d['name'], d['dimensions'], precision.value, distribution.value, d.get('globals', {}), d.get('state_shape', 1), d['functions']) def __init__(self, name: str, dimensions: int, precision: Precision, distribution: RandomDistribution, opt_globals: str, state_shape: int, functions: Dict[str, str]) -> None: self.name: str = name self.dimensions: int = dimensions self.distribution: RandomDistribution = distribution self.precision: Precision = precision self.globals: str = opt_globals self.state_shape: int = state_shape self.functions: Dict[str, str] = functions def to_dict(self) -> Dict[str, Any]: """Serializes model to dictionary (e.g. for transmission to workers) Returns: Dictionary representation of model """ return { 'name': self.name, 'dimensions': self.dimensions, 'distribution': self.distribution.value, 'precision': self.precision.value, 'globals': self.globals, 'state_shape': self.state_shape, 'functions': self.functions } def __repr__(self) -> str: return json.dumps(self, indent=4)
mit
mitsuhiko/pip
pip/_vendor/requests/packages/charade/sbcharsetprober.py
2927
4793
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .charsetprober import CharSetProber from .compat import wrap_ord SAMPLE_SIZE = 64 SB_ENOUGH_REL_THRESHOLD = 1024 POSITIVE_SHORTCUT_THRESHOLD = 0.95 NEGATIVE_SHORTCUT_THRESHOLD = 0.05 SYMBOL_CAT_ORDER = 250 NUMBER_OF_SEQ_CAT = 4 POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1 #NEGATIVE_CAT = 0 class SingleByteCharSetProber(CharSetProber): def __init__(self, model, reversed=False, nameProber=None): CharSetProber.__init__(self) self._mModel = model # TRUE if we need to reverse every pair in the model lookup self._mReversed = reversed # Optional auxiliary prober for name decision self._mNameProber = nameProber self.reset() def reset(self): CharSetProber.reset(self) # char order of last character self._mLastOrder = 255 self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT self._mTotalSeqs = 0 self._mTotalChar = 0 # characters that fall in our sampling range self._mFreqChar = 0 def get_charset_name(self): if self._mNameProber: return self._mNameProber.get_charset_name() else: return self._mModel['charsetName'] def feed(self, aBuf): if not self._mModel['keepEnglishLetter']: aBuf = self.filter_without_english_letters(aBuf) aLen = len(aBuf) if not aLen: return self.get_state() for c in aBuf: order = self._mModel['charToOrderMap'][wrap_ord(c)] if order < SYMBOL_CAT_ORDER: self._mTotalChar += 1 if order < SAMPLE_SIZE: self._mFreqChar += 1 if self._mLastOrder < SAMPLE_SIZE: self._mTotalSeqs += 1 if not self._mReversed: i = (self._mLastOrder * SAMPLE_SIZE) + order model = self._mModel['precedenceMatrix'][i] else: # reverse the order of the letters in the lookup i = (order * SAMPLE_SIZE) + self._mLastOrder model = self._mModel['precedenceMatrix'][i] self._mSeqCounters[model] += 1 self._mLastOrder = order if self.get_state() == constants.eDetecting: if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD: cf = self.get_confidence() if cf > POSITIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, we have a' 'winner\n' % (self._mModel['charsetName'], cf)) self._mState = constants.eFoundIt elif cf < NEGATIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, below negative' 'shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD)) self._mState = constants.eNotMe return self.get_state() def get_confidence(self): r = 0.01 if self._mTotalSeqs > 0: r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']) r = r * self._mFreqChar / self._mTotalChar if r >= 1.0: r = 0.99 return r
mit
zhanqxun/cv_fish
PIL/GdImageFile.py
25
2182
# # The Python Imaging Library. # $Id$ # # GD file handling # # History: # 1996-04-12 fl Created # # Copyright (c) 1997 by Secret Labs AB. # Copyright (c) 1996 by Fredrik Lundh. # # See the README file for information on usage and redistribution. # # NOTE: This format cannot be automatically recognized, so the # class is not registered for use with Image.open(). To open a # gd file, use the GdImageFile.open() function instead. # THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This # implementation is provided for convenience and demonstrational # purposes only. from PIL import ImageFile, ImagePalette, _binary from PIL._util import isPath __version__ = "0.1" try: import builtins except ImportError: import __builtin__ builtins = __builtin__ i16 = _binary.i16be ## # Image plugin for the GD uncompressed format. Note that this format # is not supported by the standard <b>Image.open</b> function. To use # this plugin, you have to import the <b>GdImageFile</b> module and # use the <b>GdImageFile.open</b> function. class GdImageFile(ImageFile.ImageFile): format = "GD" format_description = "GD uncompressed images" def _open(self): # Header s = self.fp.read(775) self.mode = "L" # FIXME: "P" self.size = i16(s[0:2]), i16(s[2:4]) # transparency index tindex = i16(s[5:7]) if tindex < 256: self.info["transparent"] = tindex self.palette = ImagePalette.raw("RGB", s[7:]) self.tile = [("raw", (0, 0)+self.size, 775, ("L", 0, -1))] ## # Load texture from a GD image file. # # @param filename GD file name, or an opened file handle. # @param mode Optional mode. In this version, if the mode argument # is given, it must be "r". # @return An image instance. # @exception IOError If the image could not be read. def open(fp, mode="r"): if mode != "r": raise ValueError("bad mode") if isPath(fp): filename = fp fp = builtins.open(fp, "rb") else: filename = "" try: return GdImageFile(fp, filename) except SyntaxError: raise IOError("cannot identify this image file")
apache-2.0
miguelparaiso/OdooAccessible
addons/account_analytic_plans/__openerp__.py
264
3114
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Multiple Analytic Plans', 'version': '1.0', 'category': 'Accounting & Finance', 'description': """ This module allows to use several analytic plans according to the general journal. ================================================================================== Here multiple analytic lines are created when the invoice or the entries are confirmed. For example, you can define the following analytic structure: ------------------------------------------------------------- * **Projects** * Project 1 + SubProj 1.1 + SubProj 1.2 * Project 2 * **Salesman** * Eric * Fabien Here, we have two plans: Projects and Salesman. An invoice line must be able to write analytic entries in the 2 plans: SubProj 1.1 and Fabien. The amount can also be split. The following example is for an invoice that touches the two subprojects and assigned to one salesman: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Plan1:** * SubProject 1.1 : 50% * SubProject 1.2 : 50% **Plan2:** Eric: 100% So when this line of invoice will be confirmed, it will generate 3 analytic lines,for one account entry. The analytic plan validates the minimum and maximum percentage at the time of creation of distribution models. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/accounting', 'depends': ['account', 'account_analytic_default'], 'data': [ 'security/account_analytic_plan_security.xml', 'security/ir.model.access.csv', 'account_analytic_plans_view.xml', 'account_analytic_plans_report.xml', 'wizard/analytic_plan_create_model_view.xml', 'wizard/account_crossovered_analytic_view.xml', 'views/report_crossoveredanalyticplans.xml', 'views/account_analytic_plans.xml', ], 'demo': [], 'test': ['test/acount_analytic_plans_report.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
J4LP/eve-wspace
evewspace/SiteTracker/models.py
5
10670
# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db import models from django.contrib.auth.models import User from Map.models import Map, System, MapSystem from core.utils import get_config from datetime import datetime import pytz # Create your models here. class Fleet(models.Model): """Represents a SiteTracker fleet.""" system = models.ForeignKey(System, related_name="stfleets") initial_boss = models.ForeignKey(User, related_name="bossfleets") current_boss = models.ForeignKey(User, related_name="currently_bossing") started = models.DateTimeField(auto_now_add=True) ended = models.DateTimeField(blank=True, null=True) roles_needed = models.ManyToManyField('SiteRole', related_name="fleets_need") class Meta: permissions = (("can_sitetracker", "Use the Site Tracker system."),) def __unicode__(self): return u"MapSystem: %s Boss: %s Started: %s Ended: %s" % (self.system.name, self.current_boss.username, self.started, self.ended) def __contains__(self, user): """ Allow for user in fleet syntax to determine both inactive and active user records for a fleet. """ if user is None: return False return self.members.filter(user=user).exists() def __iter__(self): """ Provide support for syntax: for User in fleet """ for member_rec in self.members.all(): yield member_rec.user def credit_site(self, site_type, system, boss): """ Credits a site. """ # Get the fleet member weighting variable and multiplier x = float(get_config("ST_SIZE_WEIGHT", None).value) n = self.members.count() if x > 1: weight_factor = x / float(n + (x - 1)) else: # If the factor is set to anything equal to or less than 1, # we will not weight the results by fleet size weight_factor = float(1) if SystemWeight.objects.filter(system=system).count(): weight_factor = weight_factor * system.st_weight.weight raw_points = SiteWeight.objects.get(site_type=site_type, sysclass=system.sysclass).raw_points site = SiteRecord(fleet=self, site_type=site_type, system=system, boss=boss, fleetsize=self.members.count(), raw_points=raw_points, weighted_points = raw_points * weight_factor) site.save() for user in self.members.filter(leavetime=None).all(): site.members.add(UserSite(site=site, user=user.user, pending=False)) return site def close_fleet(self): """ Closes the SiteTracker fleet. """ for member in self.members.filter(leavetime=None): member.leavetime = datetime.now(pytz.utc) member.save() self.ended = datetime.now(pytz.utc) self.save() def join_fleet(self, user): """ Adds user to fleet. """ if not self.members.filter(user=user, leavetime=None).count(): u = UserLog(fleet=self, user=user).save() else: u = self.members.get(user=user, leavetime=None) return u def active_members(self): """ Return a list of active members. """ return self.members.filter(leavetime=None) def leave_fleet(self, user): """ Removes user from fleet. """ if self.members.filter(leavetime=None).count() == 1: # We're the only member left, close the fleet. self.close_fleet() return None elif self.current_boss == user: # We're the boss, give it to the first schmuck we can. self.current_boss = self.members.exclude(user=user).filter( leavetime=None).all()[0].user self.save() UserLog.objects.filter(fleet=self, user=user, leavetime=None).update(leavetime=datetime.now(pytz.utc)) def make_boss(self, user): """ Change the current fleet boss. """ self.current_boss = user self.save() class SiteRole(models.Model): """Represents a role for a sitetracker fleet.""" short_name = models.CharField(max_length=32, unique=True) long_name = models.CharField(max_length=255, unique=True) class SiteType(models.Model): """Represents a type of site that can be credited.""" shortname = models.CharField(max_length=8, unique=True) longname = models.CharField(max_length=80, unique=True) # Defunct site types are maintained in the databse for relational purposes but can no longer be credited defunct = models.BooleanField() def __unicode__(self): return self.longname class SiteWeight(models.Model): """ Represents the raw points available for a site type / system class combo """ site_type = models.ForeignKey(SiteType, related_name='weights') sysclass = models.IntegerField(choices=[(1, "C1"), (2, "C2"), (3, "C3"), (4, "C4"), (5, "C5"), (6, "C6"), (7, "High Sec"), (8, "Low Sec"), (9, "Null Sec")]) raw_points = models.IntegerField() class SystemWeight(models.Model): """ Respresents a multiplier for site credit for a system. """ system = models.OneToOneField(System, primary_key=True, related_name='st_weight') weight = models.FloatField() class SiteRecord(models.Model): """Represents the record of a site run.""" fleet = models.ForeignKey(Fleet, related_name="sites") site_type = models.ForeignKey(SiteType, related_name="sitesrun") timestamp = models.DateTimeField(auto_now_add=True) system = models.ForeignKey(System, related_name="sitescompleted") boss = models.ForeignKey(User, related_name="sitescredited") fleetsize = models.IntegerField() raw_points = models.IntegerField() weighted_points = models.FloatField() def __unicode__(self): return u"System: %s Time: %s Type: %s" % (self.system.name, self.timestamp, self.type.shortname) def __contains__(self, user): """ Allow for if user in siterecord to determine if a user has an entry. """ if user is None: return False return self.members.filter(user=user).exists() def __iter__(self): """ Allow for syntax: for user in siterecord. """ for log in self.members.all(): yield log.user def is_pending(self, user): """ Return True if user's credit is pending. """ return self.members.get(user=user).pending class UserSite(models.Model): """Represents a user's credit for a site.""" site = models.ForeignKey(SiteRecord, related_name="members") user = models.ForeignKey(User, related_name="sites") pending = models.BooleanField() def approve(self): """ Mark the site approved. """ self.pending = False self.save() class UserLog(models.Model): """Represents a user's sitetracker log.""" fleet = models.ForeignKey(Fleet, related_name="members") user = models.ForeignKey(User, related_name="sitetrackerlogs") jointime = models.DateTimeField(auto_now_add=True) leavetime = models.DateTimeField(null=True, blank=True) def pending_sites(self): """ Returns a list of site records which are pending credit. """ pending_sites = [] for site in self.fleet.sites.all(): if UserSite.objects.filter(user=self.user, site=site, pending=True).exists(): pending_sites.append(site) return pending_sites class ClaimPeriod(models.Model): """Represents a claim period that Users can claim against.""" starttime = models.DateTimeField() endtime = models.DateTimeField() name = models.CharField(max_length = 80) closetime = models.DateTimeField(blank=True, null=True) loothauledby = models.ForeignKey(User, related_name="loothauled", null=True, blank=True) lootsoldby = models.ForeignKey(User, related_name="lootsold", null=True, blank=True) class Meta: permissions = (("can_close_claims", "Close the claims period early."), ("can_reopen_claims", "Reopen the claims period."), ("can_haul_loot", "Mark the claim period as hauled."), ("can_sell_loot", "Mark the claim period as sold."),) def __unicode__(self): return self.name class Claim(models.Model): """Represents a User's claim for a claim period.""" period = models.ForeignKey(ClaimPeriod, related_name="claims") user = models.ForeignKey(User, related_name="claims") shareclaimed = models.FloatField() description = models.TextField() bonus = models.FloatField(blank=True, null=True) class PayoutReport(models.Model): """Represents a payout report and contains general information about the payout period.""" period = models.ForeignKey(ClaimPeriod, related_name="reports") createdby = models.ForeignKey(User, related_name="payoutreports") grossprofit = models.BigIntegerField() datepaid = models.DateTimeField(blank=True, null=True) class PayoutEntry(models.Model): """Represents an entry in the payout report.""" report = models.ForeignKey(PayoutReport, related_name="entries") user = models.ForeignKey(User, related_name="payouts") claim = models.ForeignKey(Claim, related_name="payout") iskshare = models.BigIntegerField()
gpl-3.0
ychen820/microblog
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/contrib/admindocs/urls.py
317
1092
from django.conf.urls import patterns, url from django.contrib.admindocs import views urlpatterns = patterns('', url('^$', views.doc_index, name='django-admindocs-docroot' ), url('^bookmarklets/$', views.bookmarklets, name='django-admindocs-bookmarklets' ), url('^tags/$', views.template_tag_index, name='django-admindocs-tags' ), url('^filters/$', views.template_filter_index, name='django-admindocs-filters' ), url('^views/$', views.view_index, name='django-admindocs-views-index' ), url('^views/(?P<view>[^/]+)/$', views.view_detail, name='django-admindocs-views-detail' ), url('^models/$', views.model_index, name='django-admindocs-models-index' ), url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$', views.model_detail, name='django-admindocs-models-detail' ), url('^templates/(?P<template>.*)/$', views.template_detail, name='django-admindocs-templates' ), )
bsd-3-clause
funbaker/astropy
astropy/table/tests/test_table.py
2
69207
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import gc import sys import copy from io import StringIO from collections import OrderedDict import pytest import numpy as np from numpy.testing import assert_allclose from ...io import fits from ...tests.helper import (assert_follows_unicode_guidelines, ignore_warnings, catch_warnings) from ...utils.data import get_pkg_data_filename from ... import table from ... import units as u from .conftest import MaskedTable try: with ignore_warnings(DeprecationWarning): # Ignore DeprecationWarning on pandas import in Python 3.5--see # https://github.com/astropy/astropy/issues/4380 import pandas # pylint: disable=W0611 except ImportError: HAS_PANDAS = False else: HAS_PANDAS = True class SetupData: def _setup(self, table_types): self._table_type = table_types.Table self._column_type = table_types.Column @property def a(self): if self._column_type is not None: if not hasattr(self, '_a'): self._a = self._column_type( [1, 2, 3], name='a', format='%d', meta={'aa': [0, 1, 2, 3, 4]}) return self._a @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type( [4, 5, 6], name='b', format='%d', meta={'aa': 1}) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type([7, 8, 9], 'c') return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type([7, 8, 7], 'd') return self._d @property def obj(self): if self._column_type is not None: if not hasattr(self, '_obj'): self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O') return self._obj @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b]) return self._t @pytest.mark.usefixtures('table_types') class TestSetTableColumn(SetupData): def test_set_row(self, table_types): """Set a row from a tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[1] = (20, 21) assert t['a'][0] == 1 assert t['a'][1] == 20 assert t['a'][2] == 3 assert t['b'][0] == 4 assert t['b'][1] == 21 assert t['b'][2] == 6 def test_set_row_existing(self, table_types): """Set a row from another existing row""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t[0] = t[1] assert t[0][0] == 2 assert t[0][1] == 5 def test_set_row_fail_1(self, table_types): """Set a row from an incorrectly-sized or typed set of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = (20, 21, 22) with pytest.raises(ValueError): t[1] = 0 def test_set_row_fail_2(self, table_types): """Set a row from an incorrectly-typed tuple of values""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t[1] = ('abc', 'def') def test_set_new_col_new_table(self, table_types): """Create a new column in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = self.a # Test that the new column name is 'aa' and that the values match assert np.all(t['aa'] == self.a) assert t.colnames == ['aa'] def test_set_new_col_new_table_quantity(self, table_types): """Create a new column (from a quantity) in empty table using the item access syntax""" self._setup(table_types) t = table_types.Table() t['aa'] = np.array([1, 2, 3]) * u.m assert np.all(t['aa'] == np.array([1, 2, 3])) assert t['aa'].unit == u.m t['bb'] = 3 * u.m assert np.all(t['bb'] == 3) assert t['bb'].unit == u.m def test_set_new_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Add a column t['bb'] = self.b assert np.all(t['bb'] == self.b) assert t.colnames == ['a', 'bb'] assert t['bb'].meta == self.b.meta assert t['bb'].format == self.b.format # Add another column t['c'] = t['a'] assert np.all(t['c'] == t['a']) assert t.colnames == ['a', 'bb', 'c'] assert t['c'].meta == t['a'].meta assert t['c'].format == t['a'].format # Add a multi-dimensional column t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2)) assert t['d'].shape == (3, 2, 2) assert t['d'][0, 0, 1] == 1 # Add column from a list t['e'] = ['hello', 'the', 'world'] assert np.all(t['e'] == np.array(['hello', 'the', 'world'])) # Make sure setting existing column still works t['e'] = ['world', 'hello', 'the'] assert np.all(t['e'] == np.array(['world', 'hello', 'the'])) # Add a column via broadcasting t['f'] = 10 assert np.all(t['f'] == 10) # Add a column from a Quantity t['g'] = np.array([1, 2, 3]) * u.m assert np.all(t['g'].data == np.array([1, 2, 3])) assert t['g'].unit == u.m # Add a column from a (scalar) Quantity t['g'] = 3 * u.m assert np.all(t['g'].data == 3) assert t['g'].unit == u.m def test_set_new_unmasked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.Column(name='b', data=[1, 2, 3]) # unmasked t['b'] = b assert np.all(t['b'] == b) def test_set_new_masked_col_existing_table(self, table_types): """Create a new column in an existing table using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # masked or unmasked b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked t['b'] = b assert np.all(t['b'] == b) def test_set_new_col_existing_table_fail(self, table_types): """Generate failure when creating a new column using the item access syntax""" self._setup(table_types) t = table_types.Table([self.a]) # Wrong size with pytest.raises(ValueError): t['b'] = [1, 2] @pytest.mark.usefixtures('table_types') class TestEmptyData(): def test_1(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, length=100)) assert len(t['a']) == 100 def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100)) assert len(t['a']) == 100 def test_3(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int)) assert len(t['a']) == 0 def test_4(self, table_types): t = table_types.Table() # length is not given t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4))) assert len(t['a']) == 0 def test_5(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a')) # dtype is not specified assert len(t['a']) == 0 def test_add_via_setitem_and_slice(self, table_types): """Test related to #3023 where a MaskedColumn is created with name=None and then gets changed to name='a'. After PR #2790 this test fails without the #3023 fix.""" t = table_types.Table() t['a'] = table_types.Column([1, 2, 3]) t2 = t[:] assert t2.colnames == t.colnames @pytest.mark.usefixtures('table_types') class TestNewFromColumns(): def test_simple(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)] t = table_types.Table(cols) assert np.all(t['a'].data == np.array([1, 2, 3])) assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32)) assert type(t['b'][1]) is np.float32 def test_from_np_array(self, table_types): cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64), table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))] t = table_types.Table(cols) assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64)) assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32)) assert type(t['a'][1]) is np.float64 assert type(t['b'][1]) is np.float32 def test_size_mismatch(self, table_types): cols = [table_types.Column(name='a', data=[1, 2, 3]), table_types.Column(name='b', data=[4, 5, 6, 7])] with pytest.raises(ValueError): table_types.Table(cols) def test_name_none(self, table_types): """Column with name=None can init a table whether or not names are supplied""" c = table_types.Column(data=[1, 2], name='c') d = table_types.Column(data=[3, 4]) t = table_types.Table([c, d], names=(None, 'd')) assert t.colnames == ['c', 'd'] t = table_types.Table([c, d]) assert t.colnames == ['c', 'col1'] @pytest.mark.usefixtures('table_types') class TestReverse(): def test_reverse(self, table_types): t = table_types.Table([[1, 2, 3], ['a', 'b', 'cc']]) t.reverse() assert np.all(t['col0'] == np.array([3, 2, 1])) assert np.all(t['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=False) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2 = table_types.Table(t, copy=True) assert np.all(t2['col0'] == np.array([3, 2, 1])) assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) t2.sort('col0') assert np.all(t2['col0'] == np.array([1, 2, 3])) assert np.all(t2['col1'] == np.array(['a', 'b', 'cc'])) def test_reverse_big(self, table_types): x = np.arange(10000) y = x + 1 t = table_types.Table([x, y], names=('x', 'y')) t.reverse() assert np.all(t['x'] == x[::-1]) assert np.all(t['y'] == y[::-1]) @pytest.mark.usefixtures('table_types') class TestColumnAccess(): def test_1(self, table_types): t = table_types.Table() with pytest.raises(KeyError): t['a'] def test_2(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[1, 2, 3])) assert np.all(t['a'] == np.array([1, 2, 3])) with pytest.raises(KeyError): t['b'] # column does not exist def test_itercols(self, table_types): names = ['a', 'b', 'c'] t = table_types.Table([[1], [2], [3]], names=names) for name, col in zip(names, t.itercols()): assert name == col.name assert isinstance(col, table_types.Column) @pytest.mark.usefixtures('table_types') class TestAddLength(SetupData): def test_right_length(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b) def test_too_long(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long def test_too_short(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) with pytest.raises(ValueError): t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short @pytest.mark.usefixtures('table_types') class TestAddPosition(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 0) def test_2(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, 1) def test_3(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a, -1) def test_5(self, table_types): self._setup(table_types) t = table_types.Table() with pytest.raises(ValueError): t.index_column('b') def test_6(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b) assert t.columns.keys() == ['a', 'b'] def test_7(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a')) assert t.columns.keys() == ['b', 'a'] def test_8(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.add_column(self.b, t.index_column('a') + 1) assert t.columns.keys() == ['a', 'b'] def test_9(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) t.add_column(self.b, t.index_column('a') + 1) t.add_column(self.c, t.index_column('b')) assert t.columns.keys() == ['a', 'c', 'b'] def test_10(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) ia = t.index_column('a') t.add_column(self.b, ia + 1) t.add_column(self.c, ia) assert t.columns.keys() == ['c', 'a', 'b'] @pytest.mark.usefixtures('table_types') class TestAddName(SetupData): def test_override_name(self, table_types): self._setup(table_types) t = table_types.Table() # Check that we can override the name of the input column in the Table t.add_column(self.a, name='b') t.add_column(self.b, name='a') assert t.columns.keys() == ['b', 'a'] # Check that we did not change the name of the input column assert self.a.info.name == 'a' assert self.b.info.name == 'b' # Now test with an input column from another table t2 = table_types.Table() t2.add_column(t['a'], name='c') assert t2.columns.keys() == ['c'] # Check that we did not change the name of the input column assert t.columns.keys() == ['b', 'a'] # Check that we can give a name if none was present col = table_types.Column([1, 2, 3]) t.add_column(col, name='c') assert t.columns.keys() == ['b', 'a', 'c'] def test_default_name(self, table_types): t = table_types.Table() col = table_types.Column([1, 2, 3]) t.add_column(col) assert t.columns.keys() == ['col0'] @pytest.mark.usefixtures('table_types') class TestInitFromTable(SetupData): def test_from_table_cols(self, table_types): """Ensure that using cols from an existing table gives a clean copy. """ self._setup(table_types) t = self.t cols = t.columns # Construct Table with cols via Table._new_from_cols t2a = table_types.Table([cols['a'], cols['b'], self.c]) # Construct with add_column t2b = table_types.Table() t2b.add_column(cols['a']) t2b.add_column(cols['b']) t2b.add_column(self.c) t['a'][1] = 20 t['b'][1] = 21 for t2 in [t2a, t2b]: t2['a'][2] = 10 t2['b'][2] = 11 t2['c'][2] = 12 t2.columns['a'].meta['aa'][3] = 10 assert np.all(t['a'] == np.array([1, 20, 3])) assert np.all(t['b'] == np.array([4, 21, 6])) assert np.all(t2['a'] == np.array([1, 2, 10])) assert np.all(t2['b'] == np.array([4, 5, 11])) assert np.all(t2['c'] == np.array([7, 8, 12])) assert t2['a'].name == 'a' assert t2.columns['a'].meta['aa'][3] == 10 assert t.columns['a'].meta['aa'][3] == 3 @pytest.mark.usefixtures('table_types') class TestAddColumns(SetupData): def test_add_columns1(self, table_types): self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c]) assert t.colnames == ['a', 'b', 'c'] def test_add_columns2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d]) assert t.colnames == ['a', 'b', 'c', 'd'] assert np.all(t['c'] == np.array([7, 8, 9])) def test_add_columns3(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[1, 0]) assert t.colnames == ['d', 'a', 'c', 'b'] def test_add_columns4(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[0, 0]) assert t.colnames == ['c', 'd', 'a', 'b'] def test_add_columns5(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_columns([self.c, self.d], indexes=[2, 2]) assert t.colnames == ['a', 'b', 'c', 'd'] def test_add_columns6(self, table_types): """Check that we can override column names.""" self._setup(table_types) t = table_types.Table() t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a']) assert t.colnames == ['b', 'c', 'a'] def test_add_columns7(self, table_types): """Check that default names are used when appropriate.""" t = table_types.Table() col0 = table_types.Column([1, 2, 3]) col1 = table_types.Column([4, 5, 3]) t.add_columns([col0, col1]) assert t.colnames == ['col0', 'col1'] def test_add_duplicate_column(self, table_types): self._setup(table_types) t = table_types.Table() t.add_column(self.a) with pytest.raises(ValueError): t.add_column(table_types.Column(name='a', data=[0, 1, 2])) t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) t.add_column(self.b) t.add_column(self.c) assert t.colnames == ['a', 'a_1', 'b', 'c'] t.add_column(table_types.Column(name='a', data=[0, 1, 2]), rename_duplicate=True) assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2'] # test adding column from a separate Table t1 = table_types.Table() t1.add_column(self.a) with pytest.raises(ValueError): t.add_column(t1['a']) t.add_column(t1['a'], rename_duplicate=True) t1['a'][0] = 100 # Change original column assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3'] assert t1.colnames == ['a'] # Check new column didn't change (since name conflict forced a copy) assert t['a_3'][0] == self.a[0] def test_add_duplicate_columns(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.c]) with pytest.raises(ValueError): t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])]) t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])], rename_duplicate=True) t.add_column(self.d) assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd'] @pytest.mark.usefixtures('table_types') class TestAddRow(SetupData): @property def b(self): if self._column_type is not None: if not hasattr(self, '_b'): self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2]) return self._b @property def c(self): if self._column_type is not None: if not hasattr(self, '_c'): self._c = self._column_type(name='c', data=['7', '8', '9']) return self._c @property def d(self): if self._column_type is not None: if not hasattr(self, '_d'): self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]]) return self._d @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a, self.b, self.c]) return self._t def test_add_none_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O')) t.add_row() assert np.all(t['a'][0] == [0, 0]) assert t['b'][0] == '' assert t['c'][0] == 0 t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['c'][1] == 0 def test_add_stuff_to_empty_table(self, table_types): self._setup(table_types) t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O')) t.add_row([[1, 2], 'hello', 'world']) assert np.all(t['a'][0] == [1, 2]) assert t['b'][0] == 'hello' assert t['obj'][0] == 'world' # Make sure it is not repeating last row but instead # adding zeros (as documented) t.add_row() assert np.all(t['a'][1] == [0, 0]) assert t['b'][1] == '' assert t['obj'][1] == 0 def test_add_table_row(self, table_types): self._setup(table_types) t = self.t t['d'] = self.d t2 = table_types.Table([self.a, self.b, self.c, self.d]) t.add_row(t2[0]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]])) def test_add_table_row_obj(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b, self.obj]) t.add_row([1, 4.0, [10]]) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 1])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O')) def test_add_qtable_row_multidimensional(self): q = [[1, 2], [3, 4]] * u.m qt = table.QTable([q]) qt.add_row(([5, 6] * u.km,)) assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m) def test_add_with_tuple(self, table_types): self._setup(table_types) t = self.t t.add_row((4, 7.2, '1')) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) def test_add_with_list(self, table_types): self._setup(table_types) t = self.t t.add_row([4, 7.2, '10']) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) def test_add_with_dict(self, table_types): self._setup(table_types) t = self.t t.add_row({'a': 4, 'b': 7.2}) assert len(t) == 4 assert np.all(t['a'] == np.array([1, 2, 3, 4])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) if t.masked: assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) else: assert np.all(t['c'] == np.array(['7', '8', '9', ''])) def test_add_with_none(self, table_types): self._setup(table_types) t = self.t t.add_row() assert len(t) == 4 assert np.all(t['a'].data == np.array([1, 2, 3, 0])) assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0])) assert np.all(t['c'].data == np.array(['7', '8', '9', ''])) def test_add_missing_column(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row({'bad_column': 1}) def test_wrong_size_tuple(self, table_types): self._setup(table_types) t = self.t with pytest.raises(ValueError): t.add_row((1, 2)) def test_wrong_vals_type(self, table_types): self._setup(table_types) t = self.t with pytest.raises(TypeError): t.add_row(1) def test_add_row_failures(self, table_types): self._setup(table_types) t = self.t t_copy = table_types.Table(t, copy=True) # Wrong number of columns try: t.add_row([1, 2, 3, 4]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) # Wrong data type try: t.add_row(['one', 2, 3]) except ValueError: pass assert len(t) == 3 assert np.all(t.as_array() == t_copy.as_array()) def test_insert_table_row(self, table_types): """ Light testing of Table.insert_row() method. The deep testing is done via the add_row() tests which calls insert_row(index=len(self), ...), so here just test that the added index parameter is handled correctly. """ self._setup(table_types) row = (10, 40.0, 'x', [10, 20]) for index in range(-3, 4): indices = np.insert(np.arange(3), index, 3) t = table_types.Table([self.a, self.b, self.c, self.d]) t2 = t.copy() t.add_row(row) # By now we know this works t2.insert_row(index, row) for name in t.colnames: if t[name].dtype.kind == 'f': assert np.allclose(t[name][indices], t2[name]) else: assert np.all(t[name][indices] == t2[name]) for index in (-4, 4): t = table_types.Table([self.a, self.b, self.c, self.d]) with pytest.raises(IndexError): t.insert_row(index, row) @pytest.mark.usefixtures('table_types') class TestTableColumn(SetupData): def test_column_view(self, table_types): self._setup(table_types) t = self.t a = t.columns['a'] a[2] = 10 assert t['a'][2] == 10 @pytest.mark.usefixtures('table_types') class TestArrayColumns(SetupData): def test_1d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2) assert t['b'][0].shape == (2, ) def test_2d(self, table_types): self._setup(table_types) b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3) t = table_types.Table([self.a]) t.add_column(b) assert t['b'].shape == (3, 2, 4) assert t['b'][0].shape == (2, 4) def test_3d(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3) t.add_column(b) assert t['b'].shape == (3, 2, 4, 6) assert t['b'][0].shape == (2, 4, 6) @pytest.mark.usefixtures('table_types') class TestRemove(SetupData): @property def t(self): if self._table_type is not None: if not hasattr(self, '_t'): self._t = self._table_type([self.a]) return self._t @property def t2(self): if self._table_type is not None: if not hasattr(self, '_t2'): self._t2 = self._table_type([self.a, self.b, self.c]) return self._t2 def test_1(self, table_types): self._setup(table_types) self.t.remove_columns('a') assert self.t.columns.keys() == [] assert self.t.as_array() is None def test_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_columns('a') assert self.t.columns.keys() == ['b'] assert self.t.dtype.names == ('b',) assert np.all(self.t['b'] == np.array([4, 5, 6])) def test_3(self, table_types): """Check remove_columns works for a single column with a name of more than one character. Regression test against #2699""" self._setup(table_types) self.t['new_column'] = self.t['a'] assert 'new_column' in self.t.columns.keys() self.t.remove_columns('new_column') assert 'new_column' not in self.t.columns.keys() def test_remove_nonexistent_row(self, table_types): self._setup(table_types) with pytest.raises(IndexError): self.t.remove_row(4) def test_remove_row_0(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(0) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['b'] == np.array([5, 6])) def test_remove_row_1(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(1) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) def test_remove_row_2(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_row(2) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([7, 8])) def test_remove_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows(slice(0, 2, 1)) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_remove_row_list(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) self.t.remove_rows([0, 2]) assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_remove_row_preserves_meta(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.remove_rows([0, 2]) assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]} assert self.t.dtype == np.dtype([(str('a'), 'int'), (str('b'), 'int')]) def test_delitem_row(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[1] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['a'] == np.array([1, 3])) @pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])]) def test_delitem_row_list(self, table_types, idx): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[idx] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([8])) def test_delitem_row_slice(self, table_types): self._setup(table_types) self.t.add_column(self.b) self.t.add_column(self.c) del self.t[0:2] assert self.t.colnames == ['a', 'b', 'c'] assert np.all(self.t['c'] == np.array([9])) def test_delitem_row_fail(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[4] def test_delitem_row_float(self, table_types): self._setup(table_types) with pytest.raises(IndexError): del self.t[1.] def test_delitem1(self, table_types): self._setup(table_types) del self.t['a'] assert self.t.columns.keys() == [] assert self.t.as_array() is None def test_delitem2(self, table_types): self._setup(table_types) del self.t2['b'] assert self.t2.colnames == ['a', 'c'] def test_delitems(self, table_types): self._setup(table_types) del self.t2['a', 'b'] assert self.t2.colnames == ['c'] def test_delitem_fail(self, table_types): self._setup(table_types) with pytest.raises(KeyError): del self.t['d'] @pytest.mark.usefixtures('table_types') class TestKeep(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns([]) assert t.columns.keys() == [] assert t.as_array() is None def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.keep_columns('b') assert t.columns.keys() == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([4, 5, 6])) @pytest.mark.usefixtures('table_types') class TestRename(SetupData): def test_1(self, table_types): self._setup(table_types) t = table_types.Table([self.a]) t.rename_column('a', 'b') assert t.columns.keys() == ['b'] assert t.dtype.names == ('b',) assert np.all(t['b'] == np.array([1, 2, 3])) def test_2(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t.rename_column('a', 'c') t.rename_column('b', 'a') assert t.columns.keys() == ['c', 'a'] assert t.dtype.names == ('c', 'a') if t.masked: assert t.mask.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) def test_rename_by_attr(self, table_types): self._setup(table_types) t = table_types.Table([self.a, self.b]) t['a'].name = 'c' t['b'].name = 'a' assert t.columns.keys() == ['c', 'a'] assert t.dtype.names == ('c', 'a') assert np.all(t['c'] == np.array([1, 2, 3])) assert np.all(t['a'] == np.array([4, 5, 6])) @pytest.mark.usefixtures('table_types') class TestSort(): def test_single(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3])) t.add_column(table_types.Column(name='b', data=[6, 5, 4])) t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)])) assert np.all(t['a'] == np.array([2, 1, 3])) assert np.all(t['b'] == np.array([6, 5, 4])) t.sort('a') assert np.all(t['a'] == np.array([1, 2, 3])) assert np.all(t['b'] == np.array([5, 6, 4])) assert np.all(t['c'] == np.array([[3, 4], [1, 2], [4, 5]])) t.sort('b') assert np.all(t['a'] == np.array([3, 1, 2])) assert np.all(t['b'] == np.array([4, 5, 6])) assert np.all(t['c'] == np.array([[4, 5], [3, 4], [1, 2]])) def test_single_big(self, table_types): """Sort a big-ish table with a non-trivial sort order""" x = np.arange(10000) y = np.sin(x) t = table_types.Table([x, y], names=('x', 'y')) t.sort('y') idx = np.argsort(y) assert np.all(t['x'] == x[idx]) assert np.all(t['y'] == y[idx]) def test_empty(self, table_types): t = table_types.Table([[], []], dtype=['f4', 'U1']) t.sort('col1') def test_multiple(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1])) assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4])) t.sort(['a', 'b']) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) t.sort(['b', 'a']) assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2])) assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6])) t.sort(('a', 'b')) assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) def test_multiple_with_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])]) assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_multiple_with_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) t.sort(['name', 'firstname']) assert np.all([t['firstname'] == np.array( [str(x) for x in ["John", "Jo", "Max"]])]) assert np.all([t['name'] == np.array( [str(x) for x in ["Jackson", "Miller", "Miller"]])]) assert np.all([t['tel'] == np.array([19, 15, 12])]) def test_argsort(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) assert np.all(t.argsort() == t.as_array().argsort()) i0 = t.argsort('a') i1 = t.as_array().argsort(order=['a']) assert np.all(t['a'][i0] == t['a'][i1]) i0 = t.argsort(['a', 'b']) i1 = t.as_array().argsort(order=['a', 'b']) assert np.all(t['a'][i0] == t['a'][i1]) assert np.all(t['b'][i0] == t['b'][i1]) def test_argsort_bytes(self, table_types): t = table_types.Table() t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_argsort_unicode(self, table_types): # Before Numpy 1.6.2, sorting with multiple column names # failed when a unicode column was present. t = table_types.Table() t.add_column(table_types.Column( name='firstname', data=[str(x) for x in ["Max", "Jo", "John"]])) t.add_column(table_types.Column( name='name', data=[str(x) for x in ["Miller", "Miller", "Jackson"]])) t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) def test_rebuild_column_view_then_rename(self, table_types): """ Issue #2039 where renaming fails after any method that calls _rebuild_table_column_view (this includes sort and add_row). """ t = table_types.Table([[1]], names=('a',)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.add_row((2,)) assert t.colnames == ['a'] assert t.dtype.names == ('a',) t.rename_column('a', 'b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.sort('b') assert t.colnames == ['b'] assert t.dtype.names == ('b',) t.rename_column('b', 'c') assert t.colnames == ['c'] assert t.dtype.names == ('c',) @pytest.mark.usefixtures('table_types') class TestIterator(): def test_iterator(self, table_types): d = np.array([(2, 1), (3, 6), (4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')]) t = table_types.Table(d) if t.masked: with pytest.raises(ValueError): t[0] == d[0] else: for row, np_row in zip(t, d): assert np.all(row == np_row) @pytest.mark.usefixtures('table_types') class TestSetMeta(): def test_set_meta(self, table_types): d = table_types.Table(names=('a', 'b')) d.meta['a'] = 1 d.meta['b'] = 1 d.meta['c'] = 1 d.meta['d'] = 1 assert list(d.meta.keys()) == ['a', 'b', 'c', 'd'] @pytest.mark.usefixtures('table_types') class TestConvertNumpyArray(): def test_convert_numpy_array(self, table_types): d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b')) np_data = np.array(d) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert np_data is not d.as_array() assert d.colnames == list(np_data.dtype.names) np_data = np.array(d, copy=False) if table_types.Table is not MaskedTable: assert np.all(np_data == d.as_array()) assert d.colnames == list(np_data.dtype.names) with pytest.raises(ValueError): np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')]) def test_as_array_byteswap(self, table_types): """Test for https://github.com/astropy/astropy/pull/4080""" byte_orders = ('>', '<') native_order = byte_orders[sys.byteorder == 'little'] for order in byte_orders: col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8') t = table_types.Table([col]) arr = t.as_array() assert arr['a'].dtype.byteorder in (native_order, '=') arr = t.as_array(keep_byteorder=True) if order == native_order: assert arr['a'].dtype.byteorder in (order, '=') else: assert arr['a'].dtype.byteorder == order def test_byteswap_fits_array(self, table_types): """ Test for https://github.com/astropy/astropy/pull/4080, demonstrating that FITS tables are converted to native byte order. """ non_native_order = ('>', '<')[sys.byteorder != 'little'] filename = get_pkg_data_filename('data/tb.fits', 'astropy.io.fits.tests') t = table_types.Table.read(filename) arr = t.as_array() for idx in range(len(arr.dtype)): assert arr.dtype[idx].byteorder != non_native_order with fits.open(filename, character_as_bytes=True) as hdul: data = hdul[1].data for colname in data.columns.names: assert np.all(data[colname] == arr[colname]) arr2 = t.as_array(keep_byteorder=True) for colname in data.columns.names: assert (data[colname].dtype.byteorder == arr2[colname].dtype.byteorder) def _assert_copies(t, t2, deep=True): assert t.colnames == t2.colnames np.testing.assert_array_equal(t.as_array(), t2.as_array()) assert t.meta == t2.meta for col, col2 in zip(t.columns.values(), t2.columns.values()): if deep: assert not np.may_share_memory(col, col2) else: assert np.may_share_memory(col, col2) def test_copy(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = t.copy() _assert_copies(t, t2) def test_copy_masked(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True, meta={'name': 'test'}) t['x'].mask == [True, False, True] t2 = t.copy() _assert_copies(t, t2) def test_copy_protocol(): t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) t2 = copy.copy(t) t3 = copy.deepcopy(t) _assert_copies(t, t2, deep=False) _assert_copies(t, t3) def test_disallow_inequality_comparisons(): """ Regression test for #828 - disallow comparison operators on whole Table """ t = table.Table() with pytest.raises(TypeError): t > 2 with pytest.raises(TypeError): t < 1.1 with pytest.raises(TypeError): t >= 5.5 with pytest.raises(TypeError): t <= -1.1 def test_equality(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) def test_equality_masked(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') # Make into masked table t = table.Table(t, masked=True) # All rows are equal assert np.all(t == t) # Assert no rows are different assert not np.any(t != t) # Check equality result for a given row assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) # Check inequality result for a given row assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') # In the above cases, Row.__eq__ gets called, but now need to make sure # Table.__eq__ also gets called. assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that masking a value causes the row to differ t.mask['a'][0] = True assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) # Check that comparing to a structured array works assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) @pytest.mark.xfail def test_equality_masked_bug(): """ This highlights a Numpy bug. Once it works, it can be moved into the test_equality_masked test. Related Numpy bug report: https://github.com/numpy/numpy/issues/3840 """ t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') t = table.Table(t, masked=True) t2 = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 3 b 6.0 2', ' 2 a 4.0 3', ' 0 a 1.0 4', ' 1 b 3.0 5', ' 1 c 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. from ...utils.tests.test_metadata import MetaBaseTest class TestMetaTable(MetaBaseTest): test_class = table.Table args = () def test_unicode_content(): # If we don't have unicode literals then return if isinstance('', bytes): return # Define unicode literals string_a = 'астрономическая питона' string_b = 'миллиарды световых лет' a = table.Table( [[string_a, 2], [string_b, 3]], names=('a', 'b')) assert string_a in str(a) # This only works because the coding of this file is utf-8, which # matches the default encoding of Table.__str__ assert string_a.encode('utf-8') in bytes(a) def test_unicode_policy(): t = table.Table.read([' a b c d', ' 2 c 7.0 0', ' 2 b 5.0 1', ' 2 b 6.0 2', ' 2 a 4.0 3', ' 0 a 0.0 4', ' 1 b 3.0 5', ' 1 a 2.0 6', ' 1 a 1.0 7', ], format='ascii') assert_follows_unicode_guidelines(t) def test_unicode_bytestring_conversion(table_types): t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i')) assert t['col0'].dtype.kind == 'S' assert t['col1'].dtype.kind == 'U' assert t['col2'].dtype.kind == 'i' t1 = t.copy() t1.convert_unicode_to_bytestring() assert t1['col0'].dtype.kind == 'S' assert t1['col1'].dtype.kind == 'S' assert t1['col2'].dtype.kind == 'i' assert t1['col0'][0] == 'abc' assert t1['col1'][0] == 'def' assert t1['col2'][0] == 1 t1 = t.copy() t1.convert_bytestring_to_unicode() assert t1['col0'].dtype.kind == 'U' assert t1['col1'].dtype.kind == 'U' assert t1['col2'].dtype.kind == 'i' assert t1['col0'][0] == str('abc') assert t1['col1'][0] == str('def') assert t1['col2'][0] == 1 def test_table_deletion(): """ Regression test for the reference cycle discussed in https://github.com/astropy/astropy/issues/2877 """ deleted = set() # A special table subclass which leaves a record when it is finalized class TestTable(table.Table): def __del__(self): deleted.add(id(self)) t = TestTable({'a': [1, 2, 3]}) the_id = id(t) assert t['a'].parent_table is t del t # Cleanup gc.collect() assert the_id in deleted def test_nested_iteration(): """ Regression test for issue 3358 where nested iteration over a single table fails. """ t = table.Table([[0, 1]], names=['a']) out = [] for r1 in t: for r2 in t: out.append((r1['a'], r2['a'])) assert out == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_table_init_from_degenerate_arrays(table_types): t = table_types.Table(np.array([])) assert len(t.columns) == 0 with pytest.raises(ValueError): t = table_types.Table(np.array(0)) t = table_types.Table(np.array([1, 2, 3])) assert len(t.columns) == 3 @pytest.mark.skipif('not HAS_PANDAS') class TestPandas: def test_simple(self): t = table.Table() for endian in ['<', '>']: for kind in ['f', 'i']: for byte in ['2', '4', '8']: dtype = np.dtype(endian + kind + byte) x = np.array([1, 2, 3], dtype=dtype) t[endian + kind + byte] = x t['u'] = ['a', 'b', 'c'] t['s'] = ['a', 'b', 'c'] d = t.to_pandas() for column in t.columns: if column == 'u': assert np.all(t['u'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas elif column == 's': assert np.all(t['s'] == np.array(['a', 'b', 'c'])) assert d[column].dtype == np.dtype("O") # upstream feature of pandas else: # We should be able to compare exact values here assert np.all(t[column] == d[column]) if t[column].dtype.byteorder in ('=', '|'): assert d[column].dtype == t[column].dtype else: assert d[column].dtype == t[column].byteswap().newbyteorder().dtype # Regression test for astropy/astropy#1156 - the following code gave a # ValueError: Big-endian buffer not supported on little-endian # compiler. We now automatically swap the endian-ness to native order # upon adding the arrays to the data frame. d[['<i4', '>i4']] d[['<f4', '>f4']] t2 = table.Table.from_pandas(d) for column in t.columns: if column in ('u', 's'): assert np.all(t[column] == t2[column]) else: assert_allclose(t[column], t2[column]) if t[column].dtype.byteorder in ('=', '|'): assert t[column].dtype == t2[column].dtype else: assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype def test_2d(self): t = table.Table() t['a'] = [1, 2, 3] t['b'] = np.ones((3, 2)) with pytest.raises(ValueError) as exc: t.to_pandas() assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame" def test_mixin(self): from ...coordinates import SkyCoord t = table.Table() t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg') with pytest.raises(ValueError) as exc: t.to_pandas() assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame" def test_masking(self): t = table.Table(masked=True) t['a'] = [1, 2, 3] t['a'].mask = [True, False, True] t['b'] = [1., 2., 3.] t['b'].mask = [False, False, True] t['u'] = ['a', 'b', 'c'] t['u'].mask = [False, True, False] t['s'] = ['a', 'b', 'c'] t['s'].mask = [False, True, False] d = t.to_pandas() t2 = table.Table.from_pandas(d) for name, column in t.columns.items(): assert np.all(column.data == t2[name].data) assert np.all(column.mask == t2[name].mask) # Masked integer type comes back as float. Nothing we can do about this. if column.dtype.kind == 'i': assert t2[name].dtype.kind == 'f' else: if column.dtype.byteorder in ('=', '|'): assert column.dtype == t2[name].dtype else: assert column.byteswap().newbyteorder().dtype == t2[name].dtype @pytest.mark.usefixtures('table_types') class TestReplaceColumn(SetupData): def test_fail_replace_column(self, table_types): """Raise exception when trying to replace column via table.columns object""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises(ValueError): t.columns['a'] = [1, 2, 3] with pytest.raises(ValueError): t.replace_column('not there', [1, 2, 3]) def test_replace_column(self, table_types): """Replace existing column with a new column""" self._setup(table_types) t = table_types.Table([self.a, self.b]) ta = t['a'] tb = t['b'] vals = [1.2, 3.4, 5.6] for col in (vals, table_types.Column(vals), table_types.Column(vals, name='a'), table_types.Column(vals, name='b')): t.replace_column('a', col) assert np.all(t['a'] == vals) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].meta == {} assert t['a'].format is None def test_replace_index_column(self, table_types): """Replace index column and generate expected exception""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_index('a') with pytest.raises(ValueError) as err: t.replace_column('a', [1, 2, 3]) assert err.value.args[0] == 'cannot replace a table index column' class Test__Astropy_Table__(): """ Test initializing a Table subclass from a table-like object that implements the __astropy_table__ interface method. """ class SimpleTable: def __init__(self): self.columns = [[1, 2, 3], [4, 5, 6], [7, 8, 9] * u.m] self.names = ['a', 'b', 'c'] self.meta = OrderedDict([('a', 1), ('b', 2)]) def __astropy_table__(self, cls, copy, **kwargs): a, b, c = self.columns c.info.name = 'c' cols = [table.Column(a, name='a'), table.MaskedColumn(b, name='b'), c] names = [col.info.name for col in cols] return cls(cols, names=names, copy=copy, meta=kwargs or self.meta) def test_simple_1(self): """Make a SimpleTable and convert to Table, QTable with copy=False, True""" for table_cls in (table.Table, table.QTable): col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn for cpy in (False, True): st = self.SimpleTable() # Test putting in a non-native kwarg `extra_meta` to Table initializer t = table_cls(st, copy=cpy, extra_meta='extra!') assert t.colnames == ['a', 'b', 'c'] assert t.meta == {'extra_meta': 'extra!'} assert np.all(t['a'] == st.columns[0]) assert np.all(t['b'] == st.columns[1]) vals = t['c'].value if table_cls is table.QTable else t['c'] assert np.all(st.columns[2].value == vals) assert isinstance(t['a'], table.MaskedColumn) assert isinstance(t['b'], table.MaskedColumn) assert isinstance(t['c'], col_c_class) assert t['c'].unit is u.m assert type(t) is table_cls # Copy being respected? t['a'][0] = 10 assert st.columns[0][0] == 1 if cpy else 10 def test_simple_2(self): """Test converting a SimpleTable and changing column names and types""" st = self.SimpleTable() dtypes = [np.int32, np.float32, np.float16] names = ['a', 'b', 'c'] t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)])) assert t.colnames == names assert all(col.dtype.type is dtype for col, dtype in zip(t.columns.values(), dtypes)) # The supplied meta is ignored. This is consistent with current # behavior when initializing from an existing astropy Table. assert t.meta == st.meta def test_kwargs_exception(self): """If extra kwargs provided but without initializing with a table-like object, exception is raised""" with pytest.raises(TypeError) as err: table.Table([[1]], extra_meta='extra!') assert '__init__() got unexpected keyword argument' in str(err) def test_replace_column_qtable(): """Replace existing Quantity column with a new column in a QTable""" a = [1, 2, 3] * u.m b = [4, 5, 6] t = table.QTable([a, b], names=['a', 'b']) ta = t['a'] tb = t['b'] ta.info.meta = {'aa': [0, 1, 2, 3, 4]} ta.info.format = '%f' t.replace_column('a', a.to('cm')) assert np.all(t['a'] == ta) assert t['a'] is not ta # New a column assert t['b'] is tb # Original b column unchanged assert t.colnames == ['a', 'b'] assert t['a'].info.meta is None assert t['a'].info.format is None def test_replace_update_column_via_setitem(): """ Test table update like ``t['a'] = value``. This leverages off the already well-tested ``replace_column`` and in-place update ``t['a'][:] = value``, so this testing is fairly light. """ a = [1, 2] * u.m b = [3, 4] t = table.QTable([a, b], names=['a', 'b']) assert isinstance(t['a'], u.Quantity) # Inplace update ta = t['a'] t['a'] = 5 * u.m assert np.all(t['a'] == [5, 5] * u.m) assert t['a'] is ta # Replace t['a'] = [5, 6] assert np.all(t['a'] == [5, 6]) assert isinstance(t['a'], table.Column) assert t['a'] is not ta def test_replace_update_column_via_setitem_warnings_normal(): """ Test warnings related to table replace change in #5556: Normal warning-free replace """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update assert len(w) == 0 t['a'] = [10, 20, 30] # replace column assert len(w) == 0 def test_replace_update_column_via_setitem_warnings_slice(): """ Test warnings related to table replace change in #5556: Replace a slice, one warning. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t2 = t[:2] t2['a'] = 0 # in-place slice update assert np.all(t['a'] == [0, 0, 3]) assert len(w) == 0 t2['a'] = [10, 20] # replace slice assert len(w) == 1 assert "replaced column 'a' which looks like an array slice" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_attributes(): """ Test warnings related to table replace change in #5556: Lost attributes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) t['a'].unit = 'm' with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_refcount(): """ Test warnings related to table replace change in #5556: Reference count changes. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] # Generate an extra reference to original column with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['refcount', 'attributes', 'slice']): t['a'] = [10, 20, 30] assert len(w) == 1 assert "replaced column 'a' and the number of references" in str(w[0].message) def test_replace_update_column_via_setitem_warnings_always(): """ Test warnings related to table replace change in #5556: Test 'always' setting that raises warning for any replace. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) with catch_warnings() as w: with table.conf.set_temp('replace_warnings', ['always']): t['a'] = 0 # in-place slice update assert len(w) == 0 from inspect import currentframe, getframeinfo frameinfo = getframeinfo(currentframe()) t['a'] = [10, 20, 30] # replace column assert len(w) == 1 assert "replaced column 'a'" == str(w[0].message) # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert w[0].category is table.TableReplaceWarning assert 'test_table' in w[0].filename def test_replace_update_column_via_setitem_replace_inplace(): """ Test the replace_inplace config option related to #5556. In this case no replace is done. """ t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) ta = t['a'] t['a'].unit = 'm' with catch_warnings() as w: with table.conf.set_temp('replace_inplace', True): with table.conf.set_temp('replace_warnings', ['always', 'refcount', 'attributes', 'slice']): t['a'] = 0 # in-place update assert len(w) == 0 assert ta is t['a'] t['a'] = [10, 20, 30] # normally replaces column, but not now assert len(w) == 0 assert ta is t['a'] assert np.all(t['a'] == [10, 20, 30]) def test_primary_key_is_inherited(): """Test whether a new Table inherits the primary_key attribute from its parent Table. Issue #4672""" t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b')) t.add_index('a') original_key = t.primary_key # can't test if tuples are equal, so just check content assert original_key[0] is 'a' t2 = t[:] t3 = t.copy() t4 = table.Table(t) # test whether the reference is the same in the following assert original_key == t2.primary_key assert original_key == t3.primary_key assert original_key == t4.primary_key # just test one element, assume rest are equal if assert passes assert t.loc[1] == t2.loc[1] assert t.loc[1] == t3.loc[1] assert t.loc[1] == t4.loc[1] def test_qtable_read_for_ipac_table_with_char_columns(): '''Test that a char column of a QTable is assigned no unit and not a dimensionless unit, otherwise conversion of reader output to QTable fails.''' t1 = table.QTable([["A"]], names="B") out = StringIO() t1.write(out, format="ascii.ipac") t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) assert t2["B"].unit is None
bsd-3-clause
jeffery-do/Vizdoombot
doom/lib/python3.5/site-packages/theano/sparse/tests/test_basic.py
4
119562
from itertools import product import time import unittest from nose.plugins.skip import SkipTest import numpy from six.moves import xrange try: import scipy.sparse as sp import scipy.sparse from scipy.sparse import csr_matrix except ImportError: pass # The variable enable_sparse will be used to disable the test file. import theano from theano import tensor from theano import sparse from theano import compile, config, gof from theano.sparse import enable_sparse from theano.tensor.basic import _allclose from theano.tests.unittest_tools import attr if not enable_sparse: raise SkipTest('Optional package SciPy not installed') from theano.sparse.basic import _is_dense, _is_sparse, _mtypes from theano.sparse.basic import _is_dense_variable, _is_sparse_variable from theano.sparse import ( verify_grad_sparse, as_sparse_variable, CSC, CSM, CSMProperties, csm_properties, SparseType, CSMGrad, StructuredDot, StructuredDotGradCSC, StructuredDotGradCSR, AddSS, AddSD, MulSS, MulSD, Transpose, Neg, Remove0, add, mul, structured_dot, transpose, csc_from_dense, csr_from_dense, dense_from_sparse, Dot, Usmm, sp_ones_like, GetItemScalar, GetItemList, GetItem2Lists, SparseFromDense, Cast, cast, HStack, VStack, AddSSData, add_s_s_data, structured_minimum, structured_maximum, structured_add, mul_s_v, structured_add_s_v, SamplingDot, sampling_dot, Diag, diag, SquareDiagonal, square_diagonal, EnsureSortedIndices, ensure_sorted_indices, clean, ConstructSparseFromList, construct_sparse_from_list, TrueDot, true_dot, eq, neq, le, ge, gt, lt) # Probability distributions are currently tested in test_sp2.py # from theano.sparse import ( # Poisson, poisson, Binomial, Multinomial, multinomial) from theano.sparse.opt import (StructuredDotCSC, UsmmCscDense, CSMGradC) from theano.tests import unittest_tools as utt def as_sparse_format(data, format): if format == 'csc': return scipy.sparse.csc_matrix(data) elif format == 'csr': return scipy.sparse.csr_matrix(data) else: raise NotImplementedError() def eval_outputs(outputs): return compile.function([], outputs)()[0] # scipy 0.17 will return sparse values in all cases while previous # version sometimes wouldn't. This will make everything dense so that # we can use assert_allclose. def as_ndarray(val): if hasattr(val, 'toarray'): return val.toarray() return val def random_lil(shape, dtype, nnz): rval = sp.lil_matrix(shape, dtype=dtype) huge = 2 ** 30 for k in range(nnz): # set non-zeros in random locations (row x, col y) idx = numpy.random.random_integers(huge, size=2) % shape value = numpy.random.rand() # if dtype *int*, value will always be zeros! if "int" in dtype: value = int(value * 100) # The call to tuple is needed as scipy 0.13.1 do not support # ndarray with lenght 2 as idx tuple. rval.__setitem__( tuple(idx), value) return rval def sparse_random_inputs(format, shape, n=1, out_dtype=None, p=0.5, gap=None, explicit_zero=False, unsorted_indices=False): """Return a tuple containing everything needed to perform a test. If `out_dtype` is `None`, theano.config.floatX is used. :param format: Sparse format. :param shape: Shape of data. :param n: Number of variable. :param out_dtype: dtype of output. :param p: Sparsity proportion. :param gap: Tuple for the range of the random sample. When length is 1, it is assumed to be the exclusive max, when `gap` = (`a`, `b`) it provide a sample from [a, b[. If `None` is used, it provide [0, 1] for float dtypes and [0, 50[ for integer dtypes. :param explicit_zero: When True, we add explicit zero in the returned sparse matrix :param unsorted_indices: when True, we make sure there is unsorted indices in the returned sparse matrix. :return: (variable, data) where both `variable` and `data` are list. :note: explicit_zero and unsorted_indices was added in Theano 0.6rc4 """ if out_dtype is None: out_dtype = theano.config.floatX assert 0 <= p <= 1 assert len(shape) == 2 assert out_dtype in sparse.all_dtypes assert gap is None or isinstance(gap, (tuple, list)) if gap is not None and out_dtype.startswith('u'): assert gap[0] >= 0 def _rand(): where = numpy.random.binomial(1, p, size=shape).astype('int8') if out_dtype in sparse.discrete_dtypes: if not gap: value = numpy.random.randint(50, size=shape) elif len(gap) == 2: value = numpy.random.randint(gap[0], gap[1], size=shape) else: value = numpy.random.randint(gap[0], size=shape) else: if not gap: value = numpy.random.random(shape) elif len(gap) == 2: a, b = gap value = a + numpy.random.random(shape) * (b - a) else: value = numpy.random.random(shape) * gap[0] return (where * value).astype(out_dtype) variable = [getattr(theano.sparse, format + '_matrix')(dtype=out_dtype) for k in range(n)] data = [getattr(scipy.sparse, format + '_matrix')(_rand(), dtype=out_dtype) for k in range(n)] if unsorted_indices: for idx in range(n): d = data[idx] d = d[list(range(d.shape[0]))] assert not d.has_sorted_indices data[idx] = d if explicit_zero: for idx in range(n): assert data[idx].nnz > 1, ( "can't make a sparse matrix with explicit 0") d_idx = numpy.random.randint(data[idx].nnz) data[idx].data[d_idx] = 0 # numpy 1.5.0 with scipy 0.9.0 have scipy.sparse.XXX_matrix return # typenum 10(ulonglong) instead of 8(uint64) event if they are the same! # Theano don't like ulonglong type_num dtype = numpy.dtype(out_dtype) # Convert into dtype object. if data[0].dtype.num != dtype.num and dtype.str == data[0].dtype.str: data[0].data = theano._asarray(data[0].data, out_dtype) assert data[0].dtype.num == dtype.num return (variable, data) class T_verify_grad_sparse(unittest.TestCase): class FailOp(gof.op.Op): def __init__(self, structured): self.structured = structured def __eq__(self, other): return (type(self) == type(other)) and \ self.structured == other.structured def __hash__(self): return hash(type(self)) ^ hash(self.structured) def make_node(self, x): x = as_sparse_variable(x) return gof.Apply(self, [x], [x.type()]) def perform(self, node, inputs, outputs): (x,) = inputs (out,) = outputs assert _is_sparse(x) out[0] = -x def grad(self, inputs, gout): (x,) = inputs (gz,) = gout assert _is_sparse_variable(x) and _is_sparse_variable(gz) if self.structured: return sp_ones_like(x) * dense_from_sparse(gz), else: return gz, def infer_shape(self, node, shapes): return [shapes[0]] def test_grad_fail(self): self.assertRaises(verify_grad_sparse.E_grad, verify_grad_sparse, self.FailOp(structured=False), [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))]) self.assertRaises(verify_grad_sparse.E_grad, verify_grad_sparse, self.FailOp(structured=True), [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))]) class T_transpose(unittest.TestCase): def setUp(self): utt.seed_rng() def test_transpose_csc(self): sp = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3)) a = as_sparse_variable(sp) self.assertFalse(a.data is sp) self.assertTrue(a.data.shape == (5, 3)) self.assertTrue(a.type.dtype == 'float64', a.type.dtype) self.assertTrue(a.type.format == 'csc', a.type.format) ta = transpose(a) self.assertTrue(ta.type.dtype == 'float64', ta.type.dtype) self.assertTrue(ta.type.format == 'csr', ta.type.format) vta = eval_outputs([ta]) self.assertTrue(vta.shape == (3, 5)) def test_transpose_csr(self): a = as_sparse_variable(scipy.sparse.csr_matrix(scipy.sparse.eye(5, 3))) self.assertTrue(a.data.shape == (5, 3)) self.assertTrue(a.type.dtype == 'float64') self.assertTrue(a.type.format == 'csr') ta = transpose(a) self.assertTrue(ta.type.dtype == 'float64', ta.type.dtype) self.assertTrue(ta.type.format == 'csc', ta.type.format) vta = eval_outputs([ta]) self.assertTrue(vta.shape == (3, 5)) class SparseInferShapeTester(utt.InferShapeTester): def test_getitem_2d(self): raise SkipTest('infer_shape not implemented for GetItem2d yet') def test_getitem_scalar(self): x = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x], [x[2, 2]], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], GetItemScalar) def test_csm(self): for sparsetype in ('csr', 'csc'): x = tensor.vector() y = tensor.ivector() z = tensor.ivector() s = tensor.ivector() call = getattr(sp, sparsetype + '_matrix') spm = call(random_lil((300, 400), config.floatX, 5)) out = CSM(sparsetype)(x, y, z, s) self._compile_and_check([x, y, z, s], [out], [spm.data, spm.indices, spm.indptr, spm.shape], CSM ) def test_csm_grad(self): for sparsetype in ('csr', 'csc'): x = tensor.vector() y = tensor.ivector() z = tensor.ivector() s = tensor.ivector() call = getattr(sp, sparsetype + '_matrix') spm = call(random_lil((300, 400), config.floatX, 5)) out = tensor.grad(dense_from_sparse( CSM(sparsetype)(x, y, z, s) ).sum(), x) self._compile_and_check([x, y, z, s], [out], [spm.data, spm.indices, spm.indptr, spm.shape], (CSMGrad, CSMGradC) ) def test_transpose(self): x = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x], [x.T], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], Transpose) def test_neg(self): x = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x], [-x], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], Neg) def test_add_ss(self): x = SparseType('csr', dtype=config.floatX)() y = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x, y], [x + y], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3)), sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], AddSS) def test_add_sd(self): x = SparseType('csr', dtype=config.floatX)() y = tensor.matrix() self._compile_and_check( [x, y], [x + y], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3)), numpy.random.randn(10, 40).astype(config.floatX)], (AddSD, sparse.opt.AddSD_ccode)) def test_mul_ss(self): x = SparseType('csr', dtype=config.floatX)() y = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x, y], [x * y], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3)), ] * 2, MulSS) def test_mul_sd(self): x = SparseType('csr', dtype=config.floatX)() y = tensor.matrix() self._compile_and_check( [x, y], [x * y], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3)), numpy.random.randn(10, 40).astype(config.floatX)], MulSD, excluding=["local_mul_s_d"]) def test_remove0(self): x = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x], [Remove0()(x)], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], Remove0) def test_dot(self): x = SparseType('csc', dtype=config.floatX)() y = SparseType('csc', dtype=config.floatX)() self._compile_and_check( [x, y], [Dot()(x, y)], [sp.csc_matrix(random_lil((4, 5), config.floatX, 3)), sp.csc_matrix(random_lil((5, 3), config.floatX, 3))], Dot) def test_structured_dot(self): x = SparseType('csc', dtype=config.floatX)() y = SparseType('csc', dtype=config.floatX)() self._compile_and_check( [x, y], [structured_dot(x, y)], [sp.csc_matrix(random_lil((4, 5), config.floatX, 3)), sp.csc_matrix(random_lil((5, 3), config.floatX, 3))], StructuredDot) def test_structured_dot_grad(self): # We also need the grad of CSM to be implemetned. raise SkipTest('infer_shape not implemented for the grad' ' of structured_dot') for format, op in [('csc', StructuredDotGradCSC), ('csr', StructuredDotGradCSR)]: x = SparseType(format, dtype=config.floatX)() y = SparseType(format, dtype=config.floatX)() grads = tensor.grad(dense_from_sparse(structured_dot(x, y)).sum(), [x, y]) self._compile_and_check( [x, y], [grads[0]], [as_sparse_format(random_lil((4, 5), config.floatX, 3), format), as_sparse_format(random_lil((5, 3), config.floatX, 3), format)], op) self._compile_and_check( [x, y], [grads[1]], [as_sparse_format(random_lil((4, 5), config.floatX, 3), format), as_sparse_format(random_lil((5, 3), config.floatX, 3), format)], op) def test_dense_from_sparse(self): x = SparseType('csr', dtype=config.floatX)() self._compile_and_check([x], [dense_from_sparse(x)], [sp.csr_matrix(random_lil((10, 40), config.floatX, 3))], dense_from_sparse.__class__) def test_sparse_from_dense(self): x = tensor.matrix() self._compile_and_check([x], [csc_from_dense(x)], [numpy.random.randn(10, 40).astype( config.floatX)], csc_from_dense.__class__) def test_sparse_from_list(self): x = tensor.matrix('x') vals = tensor.matrix('vals') ilist = tensor.lvector('ilist') out = construct_sparse_from_list(x, vals, ilist) self._compile_and_check( [x, vals, ilist], [out], [numpy.zeros((40, 10), dtype=config.floatX), numpy.random.randn(12, 10).astype(config.floatX), numpy.random.randint(low=0, high=40, size=(12,))], ConstructSparseFromList ) class TestConstructSparseFromList(unittest.TestCase): def test_adv_sub1_sparse_grad(self): v = theano.tensor.ivector() # Assert we don't create a sparse grad by default m = theano.tensor.matrix() sub = m[v] g = theano.grad(sub.sum(), m) assert isinstance(g.owner.op, tensor.AdvancedIncSubtensor1) # Test that we create a sparse grad when asked # USER INTERFACE m = theano.tensor.matrix() v = theano.tensor.ivector() sub = theano.sparse_grad(m[v]) g = theano.grad(sub.sum(), m) assert isinstance(g.owner.op, ConstructSparseFromList) # Test that we create a sparse grad when asked # Op INTERFACE m = theano.tensor.matrix() v = theano.tensor.ivector() sub = theano.tensor.AdvancedSubtensor1(sparse_grad=True)(m, v) g = theano.grad(sub.sum(), m) assert isinstance(g.owner.op, ConstructSparseFromList) # Test the sparse grad valm = numpy.random.rand(5, 4).astype(config.floatX) valv = numpy.random.random_integers(0, 4, 10) m = theano.tensor.matrix() shared_v = theano.shared(valv) def fn(m): return theano.sparse_grad(m[shared_v]) verify_grad_sparse(fn, [valm]) def test_err(self): for ndim in [1, 3]: t = theano.tensor.TensorType(dtype=config.floatX, broadcastable=(False,) * ndim)() v = theano.tensor.ivector() sub = t[v] # Assert we don't create a sparse grad by default g = theano.grad(sub.sum(), t) assert isinstance(g.owner.op, tensor.AdvancedIncSubtensor1) # Test that we raise an error, as we can't create a sparse # grad from tensors that don't have 2 dimensions. sub = theano.sparse_grad(sub) self.assertRaises(TypeError, theano.grad, sub.sum(), t) class T_AddMul(unittest.TestCase): def testAddSS(self): self._testSS(add) def testAddSD(self): self._testSD(add) def testAddDS(self): self._testDS(add) def testMulSS(self): self._testSS(mul, numpy.array([[1., 0], [3, 0], [0, 6]]), numpy.array([[1., 2], [3, 0], [0, 6]])) def testMulSD(self): self._testSD(mul, numpy.array([[1., 0], [3, 0], [0, 6]]), numpy.array([[1., 2], [3, 0], [0, 6]])) def testMulDS(self): self._testDS(mul, numpy.array([[1., 0], [3, 0], [0, 6]]), numpy.array([[1., 2], [3, 0], [0, 6]])) def _testSS(self, op, array1=numpy.array([[1., 0], [3, 0], [0, 6]]), array2=numpy.asarray([[0, 2.], [0, 4], [5, 0]])): for mtype1, mtype2 in product(_mtypes, _mtypes): for dtype1, dtype2 in [('float64', 'int8'), ('int8', 'float64'), ('float64', 'float64'), ]: a = mtype1(array1).astype(dtype1) aR = as_sparse_variable(a) self.assertFalse(aR.data is a) self.assertTrue(_is_sparse(a)) self.assertTrue(_is_sparse_variable(aR)) b = mtype2(array2).astype(dtype2) bR = as_sparse_variable(b) self.assertFalse(bR.data is b) self.assertTrue(_is_sparse(b)) self.assertTrue(_is_sparse_variable(bR)) apb = op(aR, bR) self.assertTrue(_is_sparse_variable(apb)) self.assertTrue(apb.type.format == aR.type.format, apb.type.format) val = eval_outputs([apb]) self.assertTrue(val.shape == (3, 2)) if op is add: self.assertTrue(numpy.all(val.todense() == (array1 + array2))) if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=False) elif op is mul: self.assertTrue(numpy.all(val.todense() == (array1 * array2))) if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=False) def _testSD(self, op, array1=numpy.array([[1., 0], [3, 0], [0, 6]]), array2=numpy.asarray([[0, 2.], [0, 4], [5, 0]])): for mtype in _mtypes: for a in [numpy.array(array1), tensor.as_tensor_variable(array1), theano.shared(array1)]: for dtype1, dtype2 in [('float64', 'int8'), ('int8', 'float64'), # Needed to test the grad ('float32', 'float64'), ]: a = a.astype(dtype1) b = mtype(array2).astype(dtype2) bR = as_sparse_variable(b) self.assertFalse(bR.data is b) # constants are copied self.assertTrue(_is_sparse(b)) self.assertTrue(_is_sparse_variable(bR)) apb = op(a, bR) val = eval_outputs([apb]) self.assertTrue(val.shape == (3, 2)) if op is add: self.assertTrue(_is_dense_variable(apb)) self.assertTrue(numpy.all(val == (array1 + b))) ans = numpy.array([[1., 2], [3, 4], [5, 6]]) self.assertTrue(numpy.all(val == ans)) if isinstance(a, theano.Constant): a = a.data if getattr(a, 'owner', None): continue if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=True) elif op is mul: self.assertTrue(_is_sparse_variable(apb)) self.assertTrue(numpy.all(val.todense() == (b.multiply(array1)))) self.assertTrue(numpy.all(val.todense() == numpy.array( [[1, 0], [9, 0], [0, 36]]))) if isinstance(a, theano.Constant): a = a.data if getattr(a, 'owner', None): continue if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=False) def _testDS(self, op, array1=numpy.array([[1., 0], [3, 0], [0, 6]]), array2=numpy.asarray([[0, 2.], [0, 4], [5, 0]])): for mtype in _mtypes: for b in [numpy.asarray(array2), tensor.as_tensor_variable(array2), theano.shared(array2)]: for dtype1, dtype2 in [('float64', 'int8'), ('int8', 'float64'), ]: a = mtype(array1).astype(dtype1) aR = as_sparse_variable(a) self.assertFalse(aR.data is a) self.assertTrue(_is_sparse(a)) self.assertTrue(_is_sparse_variable(aR)) b = b.astype(dtype2) apb = op(aR, b) val = eval_outputs([apb]) self.assertTrue(val.shape == (3, 2)) if op is add: self.assertTrue(_is_dense_variable(apb)) self.assertTrue(numpy.all(val == (a + array2))) ans = numpy.array([[1., 2], [3, 4], [5, 6]]) self.assertTrue(numpy.all(val == ans)) if isinstance(b, theano.Constant): b = b.data if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=True) elif op is mul: self.assertTrue(_is_sparse_variable(apb)) ans = numpy.array([[1, 0], [9, 0], [0, 36]]) self.assertTrue(numpy.all(val.todense() == (a.multiply(array2)))) self.assertTrue(numpy.all(val.todense() == ans)) if isinstance(b, theano.Constant): b = b.data if dtype1.startswith('float') and dtype2.startswith('float'): verify_grad_sparse(op, [a, b], structured=False) class test_comparison(unittest.TestCase): def setUp(self): utt.seed_rng() # took from tensor basic_test.py def _rand_ranged(self, min, max, shape): return numpy.asarray(numpy.random.rand(*shape) * (max - min) + min, dtype=config.floatX) tests = [lambda x, y: x > y, lambda x, y: x < y, lambda x, y: x >= y, lambda x, y: x <= y] testsDic = {gt: lambda x, y: x > y, lt: lambda x, y: x < y, ge: lambda x, y: x >= y, le: lambda x, y: x <= y} def __generalized_ss_test(self, theanop, symbolicType, testOp, scipyType): scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = symbolicType() y = symbolicType() op = theanop(x, y) f = theano.function([x, y], op) m1 = scipyType(random_lil((10, 40), config.floatX, 3)) m2 = scipyType(random_lil((10, 40), config.floatX, 3)) self.assertTrue(numpy.array_equal(f(m1, m2).data, testOp(m1, m2).data)) def __generalized_sd_test(self, theanop, symbolicType, testOp, scipyType): scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = symbolicType() y = theano.tensor.matrix() op = theanop(x, y) f = theano.function([x, y], op) m1 = scipyType(random_lil((10, 40), config.floatX, 3)) m2 = self._rand_ranged(1000, -1000, [10, 40]) self.assertTrue(numpy.array_equal(f(m1, m2).data, testOp(m1, m2).data)) def __generalized_ds_test(self, theanop, symbolicType, testOp, scipyType): scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = symbolicType() y = theano.tensor.matrix() op = theanop(y, x) f = theano.function([y, x], op) m1 = scipyType(random_lil((10, 40), config.floatX, 3)) m2 = self._rand_ranged(1000, -1000, [10, 40]) self.assertTrue(numpy.array_equal(f(m2, m1).data, testOp(m2, m1).data)) def test_ss_csr_comparison(self): for op in self.tests: self.__generalized_ss_test(op, sparse.csr_matrix, op, sp.csr_matrix) def test_ss_csc_comparison(self): for op in self.tests: self.__generalized_ss_test(op, sparse.csc_matrix, op, sp.csc_matrix) def test_sd_csr_comparison(self): for op in self.tests: self.__generalized_sd_test(op, sparse.csr_matrix, op, sp.csr_matrix) def test_sd_csc_comparison(self): for op in self.tests: self.__generalized_sd_test(op, sparse.csc_matrix, op, sp.csc_matrix) def test_ds_csc_comparison(self): for op in self.testsDic: self.__generalized_ds_test(op, sparse.csc_matrix, self.testsDic[op], sp.csc_matrix) def test_ds_csr_comparison(self): for op in self.testsDic: self.__generalized_ds_test(op, sparse.csr_matrix, self.testsDic[op], sp.csr_matrix) def test_equality_case(self): """ Test assuring normal behaviour when values in the matrices are equal """ scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = sparse.csc_matrix() y = theano.tensor.matrix() m1 = sp.csc_matrix((2, 2), dtype=theano.config.floatX) m2 = numpy.asarray([[0, 0], [0, 0]], dtype=theano.config.floatX) for func in self.testsDic: op = func(y, x) f = theano.function([y, x], op) self.assertTrue(numpy.array_equal(f(m2, m1), self.testsDic[func](m2, m1))) class T_conversion(unittest.TestCase): def setUp(self): utt.seed_rng() if 0: def test0(self): a = tensor.as_tensor_variable(numpy.random.rand(5)) s = csc_from_dense(a) val = eval_outputs([s]) self.assertTrue(str(val.dtype) == 'float64') self.assertTrue(val.format == 'csc') if 0: def test1(self): a = tensor.as_tensor_variable(numpy.random.rand(5)) s = csr_from_dense(a) val = eval_outputs([s]) self.assertTrue(str(val.dtype) == 'float64') self.assertTrue(val.format == 'csr') def test_dense_from_sparse(self): # call dense_from_sparse for t in _mtypes: s = t(scipy.sparse.identity(5)) s = as_sparse_variable(s) d = dense_from_sparse(s) val = eval_outputs([d]) self.assertTrue(str(val.dtype) == s.dtype) self.assertTrue(numpy.all(val[0] == [1, 0, 0, 0, 0])) def test_todense(self): # call sparse_var.todense() for t in _mtypes: s = t(scipy.sparse.identity(5)) s = as_sparse_variable(s) d = s.toarray() val = eval_outputs([d]) self.assertTrue(str(val.dtype) == s.dtype) self.assertTrue(numpy.all(val[0] == [1, 0, 0, 0, 0])) @staticmethod def check_format_ndim(format, ndim): x = tensor.tensor( dtype=config.floatX, broadcastable=([False] * ndim), name='x') s = SparseFromDense(format)(x) s_m = - s d = dense_from_sparse(s_m) c = d.sum() g = tensor.grad(c, x) f = theano.function([x], [s, g]) f(numpy.array(0, dtype=config.floatX, ndmin=ndim)) f(numpy.array(7, dtype=config.floatX, ndmin=ndim)) def test_format_ndim(self): for format in 'csc', 'csr': for ndim in 0, 1, 2: self.check_format_ndim(format, ndim) self.assertRaises(TypeError, self.check_format_ndim, format, 3) self.assertRaises(TypeError, self.check_format_ndim, format, 4) class test_csm_properties(unittest.TestCase): def setUp(self): utt.seed_rng() def test_csm_properties_grad(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csc', 'csr']: for dtype in ['float32', 'float64']: spmat = sp_types[format](random_lil((4, 3), dtype, 3)) verify_grad_sparse(lambda *x: CSMProperties()(*x)[0], [spmat], structured=True) verify_grad_sparse(lambda *x: CSMProperties()(*x)[1], [spmat], structured=True) verify_grad_sparse(lambda *x: CSMProperties()(*x)[2], [spmat], structured=True) verify_grad_sparse(lambda *x: CSMProperties()(*x)[2], [spmat], structured=True) def test_csm_properties(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csc', 'csr']: for dtype in ['float32', 'float64']: x = SparseType(format, dtype=dtype)() f = theano.function([x], csm_properties(x)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) data, indices, indptr, shape = f(spmat) assert numpy.all(data == spmat.data) assert numpy.all(indices == spmat.indices) assert numpy.all(indptr == spmat.indptr) assert numpy.all(shape == spmat.shape) class test_csm(unittest.TestCase): def setUp(self): utt.seed_rng() def test_csm_grad(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csc', 'csr']: for dtype in ['float32', 'float64']: spmat = sp_types[format](random_lil((4, 3), dtype, 3)) verify_grad_sparse(lambda x: CSM(format)(x, spmat.indices, spmat.indptr, numpy.asarray(spmat.shape, 'int32')), [spmat.data], structured=True) def test_csm_sparser(self): """ Test support for gradients sparser than the input. """ sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csc', 'csr']: for dtype in ['float32', 'float64']: x = tensor.tensor(dtype=dtype, broadcastable=(False,)) y = tensor.ivector() z = tensor.ivector() s = tensor.ivector() a = as_sparse_variable(sp_types[format](random_lil((4, 3), dtype, 1))) f = theano.function([x, y, z, s], tensor.grad(dense_from_sparse( a * CSM(format)(x, y, z, s)).sum(), x)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) res = f(spmat.data, spmat.indices, spmat.indptr, numpy.asarray(spmat.shape, 'int32')) assert len(spmat.data) == len(res) def test_csm_unsorted(self): """ Test support for gradients of unsorted inputs. """ sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc', ]: for dtype in ['float32', 'float64']: x = tensor.tensor(dtype=dtype, broadcastable=(False,)) y = tensor.ivector() z = tensor.ivector() s = tensor.ivector() # Sparse advanced indexing produces unsorted sparse matrices a = sparse_random_inputs(format, (4, 3), out_dtype=dtype, unsorted_indices=True)[1][0] # Make sure it's unsorted assert not a.has_sorted_indices def my_op(x): y = tensor.constant(a.indices) z = tensor.constant(a.indptr) s = tensor.constant(a.shape) return tensor.sum( dense_from_sparse(CSM(format)(x, y, z, s) * a)) verify_grad_sparse(my_op, [a.data]) def test_csm(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csc', 'csr']: for dtype in ['float32', 'float64']: x = tensor.tensor(dtype=dtype, broadcastable=(False,)) y = tensor.ivector() z = tensor.ivector() s = tensor.ivector() f = theano.function([x, y, z, s], CSM(format)(x, y, z, s)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) res = f(spmat.data, spmat.indices, spmat.indptr, numpy.asarray(spmat.shape, 'int32')) assert numpy.all(res.data == spmat.data) assert numpy.all(res.indices == spmat.indices) assert numpy.all(res.indptr == spmat.indptr) assert numpy.all(res.shape == spmat.shape) class test_structureddot(unittest.TestCase): def setUp(self): utt.seed_rng() def test_structureddot_csc_grad(self): # shortcut: testing csc in float32, testing csr in float64 # allocate a random sparse matrix spmat = sp.csc_matrix(random_lil((4, 3), 'float32', 3)) mat = numpy.asarray(numpy.random.randn(3, 2), 'float32') verify_grad_sparse(structured_dot, [spmat, mat], structured=True) def buildgraph_T(spmat, mat): return structured_dot(mat.T, spmat.T) verify_grad_sparse(buildgraph_T, [spmat, mat], structured=True) def test_structureddot_csr_grad(self): # shortcut: testing csc in float32, testing csr in float64 # allocate a random sparse matrix spmat = sp.csr_matrix(random_lil((4, 3), 'float64', 3)) mat = numpy.asarray(numpy.random.randn(3, 2), 'float64') verify_grad_sparse(structured_dot, [spmat, mat], structured=True) def buildgraph_T(spmat, mat): return structured_dot(mat.T, spmat.T) verify_grad_sparse(buildgraph_T, [spmat, mat], structured=True) def test_upcast(self): typenames = ('float32', 'int64', 'int8', 'int32', 'int16', 'float64', 'complex64', 'complex128') for dense_dtype in typenames: for sparse_dtype in typenames: correct_dtype = theano.scalar.upcast(sparse_dtype, dense_dtype) a = SparseType('csc', dtype=sparse_dtype)() b = tensor.matrix(dtype=dense_dtype) d = structured_dot(a, b) assert d.type.dtype == correct_dtype # compile and run a function f = theano.function([a, b], d) M, N, K, nnz = (4, 3, 5, 3) spmat = sp.csc_matrix(random_lil((M, N), sparse_dtype, nnz)) # the following madness is necessary to workaround # an intc vs. int32 bug. # The lil makes an intc on my computer when sparse_dtype # is int32. spmat.dtype = numpy.dtype(sparse_dtype) mat = numpy.asarray(numpy.random.randn(N, K) * 9, dtype=dense_dtype) # print 'DTYPES', sparse_dtype, dense_dtype # print 'sym types', a.type, b.type # print 'dtype strings', spmat.dtype, mat.dtype # print 'numpy dtype num', mat.dtype.num # print 'scipy dtype num', spmat.data.dtype.num theano_result = f(spmat, mat) scipy_result = spmat * mat assert theano_result.shape == scipy_result.shape assert theano_result.dtype == scipy_result.dtype utt.assert_allclose(scipy_result, theano_result) def test_opt_unpack(self): # # Test that a graph involving # structured_dot(assembled_csc_matrix) is optimized to be just # a structured_dot_csc Op and no assembly of a csc_matrix. # # The optimization from structured_dot -> structured_dot_csc # is currently disabled, So this test is not expected to pass return # kerns = tensor.Tensor(dtype='int64', broadcastable=[False])('kerns') spmat = sp.lil_matrix((4, 6), dtype='int64') for i in range(5): # set non-zeros in random locations (row x, col y) x = numpy.floor(numpy.random.rand() * spmat.shape[0]) y = numpy.floor(numpy.random.rand() * spmat.shape[1]) spmat[x, y] = numpy.random.rand() * 10 spmat = sp.csc_matrix(spmat) images = tensor.Tensor(dtype='float32', broadcastable=[False, False])('images') cscmat = CSC(kerns, spmat.indices[:spmat.size], spmat.indptr, spmat.shape) f = theano.function([kerns, images], structured_dot(cscmat, images.T)) sdcscpresent = False for node in f.maker.fgraph.toposort(): # print node.op assert not isinstance(node.op, CSM) assert not isinstance(node.op, CSMProperties) if isinstance(f.maker.fgraph.toposort()[1].op, StructuredDotCSC): sdcscpresent = True assert sdcscpresent kernvals = numpy.array(spmat.data[:spmat.size]) # print 'kdtype', kernvals.dtype, kernvals.shape, # print kernvals.ndim, kernvals.dtype.num # print 'type of kernvals = ', kernvals.dtype bsize = 3 imvals = 1.0 * numpy.array(numpy.arange(bsize * spmat.shape[1]).\ reshape(bsize, spmat.shape[1]), dtype='float32') outvals = f(kernvals, imvals) # print outvals def test_dot_sparse_sparse(self): # test dot for 2 input sparse matrix sparse_dtype = 'float64' sp_mat = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix, 'bsr': sp.csr_matrix} for sparse_format_a in ['csc', 'csr', 'bsr']: for sparse_format_b in ['csc', 'csr', 'bsr']: a = SparseType(sparse_format_a, dtype=sparse_dtype)() b = SparseType(sparse_format_b, dtype=sparse_dtype)() d = theano.dot(a, b) f = theano.function([a, b], theano.Out(d, borrow=True)) topo = f.maker.fgraph.toposort() for M, N, K, nnz in [(4, 3, 2, 3), (40, 30, 20, 3), (40, 30, 20, 30), (400, 3000, 200, 6000), ]: a_val = sp_mat[sparse_format_a]( random_lil((M, N), sparse_dtype, nnz)) b_val = sp_mat[sparse_format_b]( random_lil((N, K), sparse_dtype, nnz)) f(a_val, b_val) def test_csc_correct_output_faster_than_scipy(self): sparse_dtype = 'float64' dense_dtype = 'float64' a = SparseType('csc', dtype=sparse_dtype)() b = tensor.matrix(dtype=dense_dtype) d = theano.dot(a, b) f = theano.function([a, b], theano.Out(d, borrow=True)) for M, N, K, nnz in [(4, 3, 2, 3), (40, 30, 20, 3), (40, 30, 20, 30), (400, 3000, 200, 6000), ]: spmat = sp.csc_matrix(random_lil((M, N), sparse_dtype, nnz)) mat = numpy.asarray(numpy.random.randn(N, K), dense_dtype) theano_times = [] scipy_times = [] for i in xrange(5): t0 = time.time() theano_result = f(spmat, mat) t1 = time.time() scipy_result = spmat * mat t2 = time.time() theano_times.append(t1 - t0) scipy_times.append(t2 - t1) theano_time = numpy.min(theano_times) scipy_time = numpy.min(scipy_times) speedup = scipy_time / theano_time # print scipy_times # print theano_times # print ('M=%(M)s N=%(N)s K=%(K)s nnz=%(nnz)s theano_time' # '=%(theano_time)s speedup=%(speedup)s') % locals() # fail if Theano is slower than scipy by more than a certain amount overhead_tol = 0.003 # seconds overall overhead_rtol = 1.2 # times as long utt.assert_allclose(scipy_result, theano_result) if not theano.config.mode in ["DebugMode", "DEBUG_MODE"]: self.assertFalse(theano_time > overhead_rtol * scipy_time + overhead_tol) def test_csr_correct_output_faster_than_scipy(self): # contrast with test_grad, we put csr in float32, csc in float64 sparse_dtype = 'float32' dense_dtype = 'float32' a = SparseType('csr', dtype=sparse_dtype)() b = tensor.matrix(dtype=dense_dtype) d = theano.dot(a, b) f = theano.function([a, b], d) for M, N, K, nnz in [(4, 3, 2, 3), (40, 30, 20, 3), (40, 30, 20, 30), (400, 3000, 200, 6000), ]: spmat = sp.csr_matrix(random_lil((M, N), sparse_dtype, nnz)) mat = numpy.asarray(numpy.random.randn(N, K), dense_dtype) t0 = time.time() theano_result = f(spmat, mat) t1 = time.time() scipy_result = spmat * mat t2 = time.time() theano_time = t1 - t0 scipy_time = t2 - t1 # print 'theano took', theano_time, # print 'scipy took', scipy_time overhead_tol = 0.002 # seconds overhead_rtol = 1.1 # times as long utt.assert_allclose(scipy_result, theano_result) if (not theano.config.mode in ["DebugMode", "DEBUG_MODE"] and theano.config.cxx): self.assertFalse( theano_time > overhead_rtol * scipy_time + overhead_tol, (theano_time, overhead_rtol * scipy_time + overhead_tol, scipy_time, overhead_rtol, overhead_tol)) class DotTests(utt.InferShapeTester): def setUp(self): super(DotTests, self).setUp() x_size = (10, 100) y_size = (100, 1000) utt.seed_rng() self.x_csr = scipy.sparse.csr_matrix( numpy.random.binomial(1, 0.5, x_size), dtype=theano.config.floatX) self.x_csc = scipy.sparse.csc_matrix( numpy.random.binomial(1, 0.5, x_size), dtype=theano.config.floatX) self.y = numpy.asarray(numpy.random.uniform(-1, 1, y_size), dtype=theano.config.floatX) self.y_csr = scipy.sparse.csr_matrix( numpy.random.binomial(1, 0.5, y_size), dtype=theano.config.floatX) self.y_csc = scipy.sparse.csc_matrix( numpy.random.binomial(1, 0.5, y_size), dtype=theano.config.floatX) self.v_10 = numpy.asarray(numpy.random.uniform(-1, 1, 10), dtype=theano.config.floatX) self.v_100 = numpy.asarray(numpy.random.uniform(-1, 1, 100), dtype=theano.config.floatX) def test_csr_dense(self): x = theano.sparse.csr_matrix('x') y = theano.tensor.matrix('y') v = theano.tensor.vector('v') for (x, y, x_v, y_v) in [(x, y, self.x_csr, self.y), (x, v, self.x_csr, self.v_100), (v, x, self.v_10, self.x_csr)]: f_a = theano.function([x, y], theano.sparse.dot(x, y)) f_b = lambda x, y: x * y utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check([x, y], [theano.sparse.dot(x, y)], [x_v, y_v], (Dot, Usmm, UsmmCscDense)) def test_csc_dense(self): x = theano.sparse.csc_matrix('x') y = theano.tensor.matrix('y') v = theano.tensor.vector('v') for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y), (x, v, self.x_csc, self.v_100), (v, x, self.v_10, self.x_csc)]: f_a = theano.function([x, y], theano.sparse.dot(x, y)) f_b = lambda x, y: x * y utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check([x, y], [theano.sparse.dot(x, y)], [x_v, y_v], (Dot, Usmm, UsmmCscDense)) def test_sparse_sparse(self): for d1, d2 in [('float32', 'float32'), ('float32', 'float64'), ('float64', 'float32'), ('float64', 'float64'), ('float32', 'int16'), ('float32', 'complex64'), ]: for x_f, y_f in [('csc', 'csc'), ('csc', 'csr'), ('csr', 'csc'), ('csr', 'csr'), ]: x = theano.sparse.SparseType(format=x_f, dtype=d1)('x') y = theano.sparse.SparseType(format=x_f, dtype=d2)('x') f_a = lambda x, y: x * y f_b = theano.function([x, y], theano.sparse.dot(x, y)) vx = getattr(self, 'x_' + x_f).astype(d1) vy = getattr(self, 'y_' + y_f).astype(d2) utt.assert_allclose(f_a(vx, vy).toarray(), f_b(vx, vy)) # Test infer_shape f_a = theano.function([x, y], theano.sparse.dot(x, y).shape) f_b = lambda x, y: (x * y).shape assert numpy.all(f_a(vx, vy) == f_b(vx, vy)) topo = f_a.maker.fgraph.toposort() if theano.config.mode != 'FAST_COMPILE': nb = 0 else: nb = 1 assert sum([isinstance(node.op, (Dot, Usmm, UsmmCscDense)) for node in topo]) == nb def test_cuda(self): import theano.sandbox.cuda as cuda if not cuda.cuda_available: raise SkipTest("Optional package cuda not available") a = sparse.csr_matrix('a', dtype='float32') b = cuda.float32_shared_constructor( numpy.random.rand(3, 4).astype('float32')) d = sparse.dot(a, b) f = theano.function([a], d) a_val = scipy.sparse.csr_matrix(random_lil((5, 3), 'float32', 5)) d_theano = f(a_val) d_numpy = a_val * b.get_value() utt.assert_allclose(d_numpy, d_theano) def test_int32_dtype(self): # Reported on the theano-user mailing-list: # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ size = 9 intX = 'int32' C = tensor.matrix('C', dtype=intX) I = tensor.matrix('I', dtype=intX) fI = I.flatten() data = tensor.ones_like(fI) indptr = tensor.arange(data.shape[0] + 1, dtype='int32') m1 = sparse.CSR(data, fI, indptr, (8, size)) m2 = sparse.dot(m1, C) y = m2.reshape(shape=(2, 4, 9), ndim=3) f = theano.function(inputs=[I, C], outputs=y) i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX) a = numpy.asarray(numpy.random.randint(0, 100, (size, size)), dtype=intX) f(i, a) def test_csr_dense_grad(self): # shortcut: testing csc in float32, testing csr in float64 # allocate a random sparse matrix spmat = sp.csr_matrix(random_lil((4, 3), 'float64', 3)) mat = numpy.asarray(numpy.random.randn(2, 4), 'float64') def buildgraph_T(mat): return Dot()(mat, spmat) theano.tests.unittest_tools.verify_grad(buildgraph_T, [mat]) class UsmmTests(unittest.TestCase): """ Test the Usmm and UsmmCscDense class and related optimization """ def setUp(self): x_size = (10, 100) y_size = (100, 200) z_size = (x_size[0], y_size[1]) self.rng = numpy.random.RandomState(seed=utt.fetch_seed()) self.x = numpy.asarray(self.rng.binomial(1, 0.5, x_size), dtype=theano.config.floatX) self.y = numpy.asarray(self.rng.uniform(-1, 1, y_size), dtype=theano.config.floatX) self.z = numpy.asarray(self.rng.uniform(-1, 1, z_size), dtype=theano.config.floatX) # this is slow, but it's the only test for the op. def test(self): def mat(format, name, dtype): if format == 'dense': return theano.tensor.matrix(name, dtype=dtype) else: return theano.sparse.matrix(format, name, dtype=dtype) params = product(*([['float32', 'float64', 'int16', 'complex64']] * 4 + [['dense', 'csc', 'csr']] * 2)) # All test are too slow, so we randomly take 100 of them. # The buildbot change the seed, so we will finish by running them all. # As of this writing they where all passing. #params = self.rng.permutation(list(params))[:500] for dtype1, dtype2, dtype3, dtype4, format1, format2 in params: if format1 == 'dense' and format2 == 'dense': # Usmm won't be used! continue x = mat(format1, 'x', dtype1) y = mat(format2, 'y', dtype2) a = theano.tensor.scalar('a', dtype=dtype3) z = theano.shared(numpy.asarray(self.z, dtype=dtype4).copy()) f_b = lambda z, a, x, y: z - a * (x * y) x_data = numpy.asarray(self.x, dtype=dtype1) if format1 != 'dense': x_data = as_sparse_format(x_data, format1) y_data = numpy.asarray(self.y, dtype=dtype2) if format2 != 'dense': y_data = as_sparse_format(y_data, format2) a_data = numpy.asarray(1.5, dtype=dtype3) z_data = numpy.asarray(self.z, dtype=dtype4) f_b_out = f_b(z_data, a_data, x_data, y_data) # Can it work inplace? inplace = dtype4 == theano.scalar.upcast(dtype1, dtype2, dtype3) # To make it easier to check the toposort mode = theano.compile.mode.get_default_mode().excluding('fusion') if inplace: updates = [(z, z - a * theano.sparse.dot(x, y))] f_a = theano.function([a, x, y], [], updates=updates, mode=mode) f_a(a_data, x_data, y_data) f_a_out = z.get_value(borrow=True) else: f_a = theano.function([a, x, y], z - a * theano.sparse.dot(x, y), mode=mode) # In DebugMode there is a strange difference with complex # So we raise a little the threshold a little. try: orig_atol = theano.tensor.basic.float64_atol orig_rtol = theano.tensor.basic.float64_rtol theano.tensor.basic.float64_atol = 1e-7 theano.tensor.basic.float64_rtol = 1e-6 f_a_out = f_a(a_data, x_data, y_data) finally: theano.tensor.basic.float64_atol = orig_atol theano.tensor.basic.float64_rtol = orig_rtol # As we do a dot product of 2 vector of 100 element, # This mean we can have 2*100*eps abs error. if f_a_out.dtype in ['float64', 'complex128']: atol = 3e-8 rtol = 1e-5 else: atol = None rtol = None utt.assert_allclose(f_a_out, f_b_out, rtol=rtol, atol=atol) topo = f_a.maker.fgraph.toposort() up = theano.scalar.upcast(dtype1, dtype2, dtype3, dtype4) fast_compile = theano.config.mode == "FAST_COMPILE" if not theano.config.blas.ldflags: # Usmm should not be inserted, because it relies on BLAS assert len(topo) == 4, topo assert isinstance(topo[0].op, theano.sparse.Dot) assert isinstance(topo[1].op, theano.tensor.DimShuffle) assert (isinstance(topo[2].op, theano.tensor.Elemwise) and isinstance(topo[2].op.scalar_op, theano.scalar.Mul)) assert (isinstance(topo[3].op, theano.tensor.Elemwise) and isinstance(topo[3].op.scalar_op, theano.scalar.Sub)) elif (y.type.dtype == up and format1 == 'csc' and format2 == 'dense' and not fast_compile and theano.config.cxx and up in ('float32', 'float64')): # The op UsmmCscDense should be inserted assert (sum([isinstance(node.op, tensor.Elemwise) and isinstance(node.op.scalar_op, theano.scalar.basic.Cast) for node in topo]) == len(topo) - 5) new_topo = [] for node in topo: if not (isinstance(node.op, tensor.Elemwise) and isinstance(node.op.scalar_op, theano.scalar.basic.Cast)): new_topo.append(node) topo = new_topo assert len(topo) == 5, topo # Usmm is tested at the same time in debugmode # Check if the optimization local_usmm and local_usmm_csx is # applied assert isinstance(topo[0].op, theano.sparse.basic.CSMProperties) assert isinstance(topo[1].op, theano.tensor.DimShuffle) assert isinstance(topo[2].op, theano.tensor.Subtensor) assert topo[3].op == theano.tensor.neg assert isinstance(topo[4].op, UsmmCscDense) if inplace: assert topo[4].op.inplace elif not fast_compile: # The op Usmm should be inserted assert len(topo) == 3, topo assert isinstance(topo[0].op, theano.tensor.DimShuffle) assert topo[1].op == theano.tensor.neg assert isinstance(topo[2].op, theano.sparse.Usmm) def test_infer_shape(self): def mat(format, name, dtype): if format == 'dense': return theano.tensor.matrix(name, dtype=dtype) else: return theano.sparse.matrix(format, name, dtype=dtype) params = [('float32', 'float64', 'int16', 'complex64', 'csc', 'dense'), ('float32', 'float64', 'int16', 'complex64', 'csr', 'dense')] for dtype1, dtype2, dtype3, dtype4, format1, format2 in params: if format1 == 'dense' and format2 == 'dense': # Usmm won't be used! continue x = mat(format1, 'x', dtype1) y = mat(format2, 'y', dtype2) a = theano.tensor.scalar('a', dtype=dtype3) z = theano.shared(numpy.asarray(self.z, dtype=dtype4).copy()) f_b = lambda z, a, x, y: z - a * (x * y) x_data = numpy.asarray(self.x, dtype=dtype1) if format1 != 'dense': x_data = as_sparse_format(x_data, format1) y_data = numpy.asarray(self.y, dtype=dtype2) if format2 != 'dense': y_data = as_sparse_format(y_data, format2) a_data = numpy.asarray(1.5, dtype=dtype3) z_data = numpy.asarray(self.z, dtype=dtype4) f_b_out = f_b(z_data, a_data, x_data, y_data) # Can it work inplace? inplace = dtype4 == theano.scalar.upcast(dtype1, dtype2, dtype3) # To make it easier to check the toposort mode = theano.compile.mode.get_default_mode().excluding('fusion') # test infer_shape of Dot got applied f_shape = theano.function([a, x, y], (z - a * theano.sparse.dot(x, y)).shape, mode=mode) assert all(f_shape(a_data, x_data, y_data) == f_b_out.shape) topo = f_shape.maker.fgraph.toposort() if theano.config.mode != 'FAST_COMPILE': nb = 0 else: nb = 1 assert sum([isinstance(node.op, (Dot, Usmm, UsmmCscDense)) for node in topo]) == nb class test_zeros_like(unittest.TestCase): def test(self): x = theano.sparse.csr_matrix() f = theano.function([x], theano.sparse.sp_zeros_like(x)) vx = scipy.sparse.csr_matrix(numpy.asarray( numpy.random.binomial(1, 0.5, (100, 100)), dtype=theano.config.floatX)) fx = f(vx) assert fx.nnz == 0 assert fx.shape == vx.shape def test_shape_i(): sparse_dtype = 'float32' a = SparseType('csr', dtype=sparse_dtype)() f = theano.function([a], a.shape[1]) assert f(sp.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == 10 def test_shape(): # Test that getting the shape of a sparse variable # does not actually create a dense tensor in the process. sparse_dtype = 'float32' a = SparseType('csr', dtype=sparse_dtype)() f = theano.function([a], a.shape) assert numpy.all(f(sp.csr_matrix(random_lil((100, 10), sparse_dtype, 3))) == (100, 10)) if theano.config.mode != 'FAST_COMPILE': topo = f.maker.fgraph.toposort() assert len(topo) == 3 assert isinstance(topo[0].op, tensor.opt.Shape_i) assert isinstance(topo[1].op, tensor.opt.Shape_i) assert isinstance(topo[2].op, tensor.opt.MakeVector) def test_may_share_memory(): a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3)) b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3)) as_ar = lambda a: theano._asarray(a, dtype='int32') for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False), (a, a.data, True), (a, a.indptr, True), (a, a.indices, True), (a, as_ar(a.shape), False), (a.data, a, True), (a.indptr, a, True), (a.indices, a, True), (as_ar(a.shape), a, False), (b, b.data, True), (b, b.indptr, True), (b, b.indices, True), (b, as_ar(b.shape), False), (b.data, b, True), (b.indptr, b, True), (b.indices, b, True), (as_ar(b.shape), b, False), (b.data, a, False), (b.indptr, a, False), (b.indices, a, False), (as_ar(b.shape), a, False), (a.transpose(), a, True), (b.transpose(), b, True), (a.transpose(), b, False), (b.transpose(), a, False), ]: assert SparseType.may_share_memory(a_, b_) == rep def test_sparse_shared_memory(): # Note : There are no inplace ops on sparse matrix yet. If one is # someday implemented, we could test it here. a = random_lil((3, 4), 'float32', 3).tocsr() m1 = random_lil((4, 4), 'float32', 3).tocsr() m2 = random_lil((4, 4), 'float32', 3).tocsr() x = SparseType('csr', dtype='float32')() y = SparseType('csr', dtype='float32')() sdot = theano.sparse.structured_dot z = sdot(x * 3, m1) + sdot(y * 2, m2) f = theano.function([theano.In(x, mutable=True), theano.In(y, mutable=True)], z, mode='FAST_RUN') def f_(x, y, m1=m1, m2=m2): return ((x * 3) * m1) + ((y * 2) * m2) assert SparseType.may_share_memory(a, a) # This is trivial result = f(a, a) result_ = f_(a, a) assert (result_.todense() == result.todense()).all() def test_size(): """ Ensure the `size` attribute of sparse matrices behaves as in numpy. """ for sparse_type in ('csc_matrix', 'csr_matrix'): x = getattr(theano.sparse, sparse_type)() y = getattr(scipy.sparse, sparse_type)((5, 7)).astype(config.floatX) get_size = theano.function([x], x.size) def check(): assert y.size == get_size(y) # We verify that the size is correctly updated as we store more data # into the sparse matrix (including zeros). check() y[0, 0] = 1 check() y[0, 1] = 0 check() class ColScaleCSCTester(utt.InferShapeTester): def setUp(self): super(ColScaleCSCTester, self).setUp() self.op = sparse.col_scale def test_op(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(10).astype(config.floatX)) f = theano.function(variable, self.op(*variable)) tested = f(*data) x, s = data[0].toarray(), data[1][numpy.newaxis, :] expected = x * s assert tested.format == format utt.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [('csc', sparse.ColScaleCSC), ('csr', sparse.RowScaleCSC)]: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(10).astype(config.floatX)) self._compile_and_check(variable, [self.op(*variable)], data, cls) def test_grad(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(10).astype(config.floatX)) verify_grad_sparse(self.op, data, structured=True) class RowScaleCSCTester(utt.InferShapeTester): def setUp(self): super(RowScaleCSCTester, self).setUp() self.op = sparse.row_scale def test_op(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(8).astype(config.floatX)) f = theano.function(variable, self.op(*variable)) tested = f(*data) x, s = data[0].toarray(), data[1][:, numpy.newaxis] expected = x * s assert tested.format == format utt.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [('csc', sparse.RowScaleCSC), ('csr', sparse.ColScaleCSC)]: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(8).astype(config.floatX)) self._compile_and_check(variable, [self.op(*variable)], data, cls) def test_grad(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(8, 10)) variable.append(tensor.vector()) data.append(numpy.random.random(8).astype(config.floatX)) verify_grad_sparse(self.op, data, structured=True) class SpSumTester(utt.InferShapeTester): possible_axis = [None, 0, 1] def setUp(self): super(SpSumTester, self).setUp() self.op_class = sparse.SpSum self.op = sparse.sp_sum def test_op(self): for format in sparse.sparse_formats: for axis in self.possible_axis: variable, data = sparse_random_inputs(format, shape=(10, 10)) z = theano.sparse.sp_sum(variable[0], axis=axis) if axis is None: assert z.type.broadcastable == () else: assert z.type.broadcastable == (False, ) f = theano.function(variable, self.op(variable[0], axis=axis)) tested = f(*data) expected = data[0].todense().sum(axis).ravel() utt.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: for axis in self.possible_axis: variable, data = sparse_random_inputs(format, shape=(9, 10)) self._compile_and_check(variable, [self.op(variable[0], axis=axis)], data, self.op_class) def test_grad(self): for format in sparse.sparse_formats: for axis in self.possible_axis: for struct in [True, False]: variable, data = sparse_random_inputs(format, shape=(9, 10)) verify_grad_sparse( self.op_class(axis=axis, sparse_grad=struct), data, structured=struct) class DiagTester(utt.InferShapeTester): def setUp(self): super(DiagTester, self).setUp() self.op_class = Diag self.op = diag def test_op(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(10, 10)) z = self.op(*variable) assert z.type.broadcastable == (False, ) f = theano.function(variable, z) tested = f(*data) expected = data[0].toarray().diagonal() utt.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(10, 10)) self._compile_and_check(variable, [self.op(*variable)], data, self.op_class, warn=False) def test_grad(self): for format in sparse.sparse_formats: variable, data = sparse_random_inputs(format, shape=(10, 10)) verify_grad_sparse( self.op, data, structured=False) class SquareDiagonalTester(utt.InferShapeTester): def setUp(self): super(SquareDiagonalTester, self).setUp() self.op_class = SquareDiagonal self.op = square_diagonal def test_op(self): for format in sparse.sparse_formats: for size in range(5, 9): variable = [tensor.vector()] data = [numpy.random.random(size).astype(config.floatX)] f = theano.function(variable, self.op(*variable)) tested = f(*data).toarray() expected = numpy.diag(*data) utt.assert_allclose(expected, tested) assert tested.dtype == expected.dtype assert tested.shape == expected.shape def test_infer_shape(self): for format in sparse.sparse_formats: for size in range(5, 9): variable = [tensor.vector()] data = [numpy.random.random(size).astype(config.floatX)] self._compile_and_check(variable, [self.op(*variable)], data, self.op_class) def test_grad(self): for format in sparse.sparse_formats: for size in range(5, 9): variable = [tensor.vector()] data = [numpy.random.random(size).astype(config.floatX)] verify_grad_sparse( self.op, data, structured=False) class EnsureSortedIndicesTester(utt.InferShapeTester): def setUp(self): super(EnsureSortedIndicesTester, self).setUp() self.op_class = EnsureSortedIndices self.op = ensure_sorted_indices def test_op(self): for format in sparse.sparse_formats: for shape in zip(range(5, 9), range(3, 7)[::-1]): variable, data = sparse_random_inputs(format, shape=shape) f = theano.function(variable, self.op(*variable)) tested = f(*data).toarray() expected = data[0].sorted_indices().toarray() utt.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: for shape in zip(range(5, 9), range(3, 7)[::-1]): variable, data = sparse_random_inputs(format, shape=shape) self._compile_and_check(variable, [self.op(*variable)], data, self.op_class) def test_grad(self): for format in sparse.sparse_formats: for shape in zip(range(5, 9), range(3, 7)[::-1]): variable, data = sparse_random_inputs(format, shape=shape) verify_grad_sparse( self.op, data, structured=False) class CleanTester(utt.InferShapeTester): def setUp(self): super(CleanTester, self).setUp() self.op = clean def test_op(self): for format in sparse.sparse_formats: for shape in zip(range(5, 9), range(3, 7)[::-1]): variable, data = sparse_random_inputs(format, shape=shape) data[0][0, 0] = data[0][1, 1] = 0 f = theano.function(variable, self.op(*variable)) tested = f(*data) expected = data[0] expected.eliminate_zeros() assert all(tested.data == expected.data) assert not all(tested.data == 0) tested = tested.toarray() expected = expected.toarray() utt.assert_allclose(expected, tested) def test_grad(self): for format in sparse.sparse_formats: for shape in zip(range(5, 9), range(3, 7)[::-1]): variable, data = sparse_random_inputs(format, shape=shape) verify_grad_sparse( self.op, data, structured=False) class Remove0Tester(utt.InferShapeTester): def setUp(self): super(Remove0Tester, self).setUp() self.op_class = Remove0 def test_remove0(self): configs = [ # structure type, numpy matching class ('csc', scipy.sparse.csc_matrix), ('csr', scipy.sparse.csr_matrix), ] for format, matrix_class in configs: for zero, unsor in [(True, True), (True, False), (False, True), (False, False)]: (x,), (mat,) = sparse_random_inputs(format, (6, 8), out_dtype=config.floatX, explicit_zero=zero, unsorted_indices=unsor) assert 0 in mat.data or not zero assert not mat.has_sorted_indices or not unsor # the In thingy has to be there because theano has as rule not # to optimize inputs f = theano.function([theano.In(x, borrow=True, mutable=True)], Remove0()(x)) # assert optimization local_inplace_remove0 is applied in # modes with optimization if theano.config.mode not in ['FAST_COMPILE']: # list of apply nodes in the optimized graph. nodes = f.maker.fgraph.toposort() # Check there isn't any Remove0 instance not inplace. assert not any([isinstance(node.op, Remove0) and not node.op.inplace for node in nodes]), ( 'Inplace optimization should have been applied') # Check there is at least one Remove0 inplace. assert any([isinstance(node.op, Remove0) and node.op.inplace for node in nodes]) # checking # makes sense to change its name target = mat result = f(mat) mat.eliminate_zeros() msg = 'Matrices sizes differ. Have zeros been removed ?' assert result.size == target.size, msg if unsor: assert not result.has_sorted_indices assert not target.has_sorted_indices else: assert result.has_sorted_indices assert target.has_sorted_indices def test_infer_shape(self): mat = (numpy.arange(12) + 1).reshape((4, 3)) mat[0, 1] = mat[1, 0] = mat[2, 2] = 0 x_csc = theano.sparse.csc_matrix(dtype=theano.config.floatX) mat_csc = sp.csc_matrix(mat, dtype=theano.config.floatX) self._compile_and_check([x_csc], [Remove0()(x_csc)], [mat_csc], self.op_class) x_csr = theano.sparse.csr_matrix(dtype=theano.config.floatX) mat_csr = sp.csr_matrix(mat, dtype=theano.config.floatX) self._compile_and_check([x_csr], [Remove0()(x_csr)], [mat_csr], self.op_class) def test_grad(self): mat = (numpy.arange(9) + 1).reshape((3, 3)) mat[0, 1] = mat[1, 0] = mat[2, 2] = 0 mat_csc = sp.csc_matrix(mat, dtype=theano.config.floatX) verify_grad_sparse(Remove0(), [mat_csc]) mat_csr = sp.csr_matrix(mat, dtype=theano.config.floatX) verify_grad_sparse(Remove0(), [mat_csr]) class Test_getitem(unittest.TestCase): def setUp(self): self.rng = numpy.random.RandomState(utt.fetch_seed()) def test_GetItemList(self): a, A = sparse_random_inputs('csr', (4, 5)) b, B = sparse_random_inputs('csc', (4, 5)) y = a[0][[0, 1, 2, 3, 1]] z = b[0][[0, 1, 2, 3, 1]] fa = theano.function([a[0]], y) fb = theano.function([b[0]], z) t_geta = fa(A[0]).todense() t_getb = fb(B[0]).todense() s_geta = scipy.sparse.csr_matrix(A[0])[[0, 1, 2, 3, 1]].todense() s_getb = scipy.sparse.csc_matrix(B[0])[[0, 1, 2, 3, 1]].todense() utt.assert_allclose(t_geta, s_geta) utt.assert_allclose(t_getb, s_getb) def test_GetItemList_wrong_index(self): a, A = sparse_random_inputs('csr', (4, 5)) y = a[0][[0, 4]] f = theano.function([a[0]], y) self.assertRaises(IndexError, f, A[0]) def test_get_item_list_grad(self): op = theano.sparse.basic.GetItemList() def op_with_fixed_index(x): return op(x, index=numpy.asarray([0, 1])) x, x_val = sparse_random_inputs("csr", (4, 5)) try: verify_grad_sparse(op_with_fixed_index, x_val) except NotImplementedError as e: assert "Scipy version is to old" in str(e) def test_GetItem2Lists(self): a, A = sparse_random_inputs('csr', (4, 5)) b, B = sparse_random_inputs('csc', (4, 5)) y = a[0][[0, 0, 1, 3], [0, 1, 2, 4]] z = b[0][[0, 0, 1, 3], [0, 1, 2, 4]] fa = theano.function([a[0]], y) fb = theano.function([b[0]], z) t_geta = fa(A[0]) t_getb = fb(B[0]) s_geta = numpy.asarray(scipy.sparse.csr_matrix(A[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) s_getb = numpy.asarray(scipy.sparse.csc_matrix(B[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) utt.assert_allclose(t_geta, s_geta) utt.assert_allclose(t_getb, s_getb) def test_GetItem2Lists_wrong_index(self): a, A = sparse_random_inputs('csr', (4, 5)) y1 = a[0][[0, 5], [0, 3]] y2 = a[0][[0, 3], [0, 5]] f1 = theano.function([a[0]], y1) f2 = theano.function([a[0]], y2) self.assertRaises(IndexError, f1, A[0]) self.assertRaises(IndexError, f2, A[0]) def test_get_item_2lists_grad(self): op = theano.sparse.basic.GetItem2Lists() def op_with_fixed_index(x): return op(x, ind1=numpy.asarray([0, 1]), ind2=numpy.asarray([2, 3])) x, x_val = sparse_random_inputs("csr", (4, 5)) verify_grad_sparse(op_with_fixed_index, x_val) def test_GetItem2D(self): scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] is_supported_version = bool(scipy_ver >= [0, 14]) sparse_formats = ('csc', 'csr') for format in sparse_formats: x = theano.sparse.matrix(format, name='x') a = theano.tensor.iscalar('a') b = theano.tensor.iscalar('b') c = theano.tensor.iscalar('c') d = theano.tensor.iscalar('d') e = theano.tensor.iscalar('e') f = theano.tensor.iscalar('f') # index m = 1 n = 5 p = 10 q = 15 if is_supported_version: j = 2 k = 3 else: j = None k = None vx = as_sparse_format(self.rng.binomial(1, 0.5, (100, 97)), format).astype(theano.config.floatX) #mode_no_debug = theano.compile.mode.get_default_mode() # if isinstance(mode_no_debug, theano.compile.DebugMode): # mode_no_debug = 'FAST_RUN' if is_supported_version: f1 = theano.function([x, a, b, c, d, e, f], x[a:b:e, c:d:f]) r1 = f1(vx, m, n, p, q, j, k) t1 = vx[m:n:j, p:q:k] else: f1 = theano.function([x, a, b, c, d], x[a:b, c:d]) r1 = f1(vx, m, n, p, q) t1 = vx[m:n, p:q] assert r1.shape == t1.shape assert numpy.all(t1.toarray() == r1.toarray()) """ Important: based on a discussion with both Fred and James The following indexing methods is not supported because the rval would be a sparse matrix rather than a sparse vector, which is a deviation from numpy indexing rule. This decision is made largely for keeping the consistency between numpy and theano. f2 = theano.function([x, a, b, c], x[a:b, c]) r2 = f2(vx, m, n, p) t2 = vx[m:n, p] assert r2.shape == t2.shape assert numpy.all(t2.toarray() == r2.toarray()) f3 = theano.function([x, a, b, c], x[a, b:c]) r3 = f3(vx, m, n, p) t3 = vx[m, n:p] assert r3.shape == t3.shape assert numpy.all(t3.toarray() == r3.toarray()) f5 = theano.function([x], x[1:2,3]) r5 = f5(vx) t5 = vx[1:2, 3] assert r5.shape == t5.shape assert numpy.all(r5.toarray() == t5.toarray()) f7 = theano.function([x], x[50]) r7 = f7(vx) t7 = vx[50] assert r7.shape == t7.shape assert numpy.all(r7.toarray() == t7.toarray()) """ if is_supported_version: f4 = theano.function([x, a, b, e], x[a:b:e]) r4 = f4(vx, m, n, j) t4 = vx[m:n:j] else: f4 = theano.function([x, a, b], x[a:b]) r4 = f4(vx, m, n) t4 = vx[m:n] assert r4.shape == t4.shape assert numpy.all(t4.toarray() == r4.toarray()) #----------------------------------------------------------- # test cases using int indexing instead of theano variable f6 = theano.function([x], x[1:10:j, 10:20:k]) r6 = f6(vx) t6 = vx[1:10:j, 10:20:k] assert r6.shape == t6.shape assert numpy.all(r6.toarray() == t6.toarray()) #---------------------------------------------------------- # test cases with indexing both with theano variable and int if is_supported_version: f8 = theano.function([x, a, b, e], x[a:b:e, 10:20:1]) r8 = f8(vx, m, n, j) t8 = vx[m:n:j, 10:20:1] else: f8 = theano.function([x, a, b], x[a:b, 10:20]) r8 = f8(vx, m, n) t8 = vx[m:n, 10:20] assert r8.shape == t8.shape assert numpy.all(r8.toarray() == t8.toarray()) f9 = theano.function([x, a, b], x[1:a:j, 1:b:k]) r9 = f9(vx, p, q) t9 = vx[1:p:j, 1:q:k] assert r9.shape == t9.shape assert numpy.all(r9.toarray() == t9.toarray()) #----------------------------------------------------------- # Test mixing None and variables f10 = theano.function([x, a, b], x[:a, :b]) r10 = f10(vx, p, q) t10 = vx[:p, :q] assert r10.shape == t10.shape assert numpy.all(r10.toarray() == t10.toarray()) f11 = theano.function([x, a], x[:, a:]) r11 = f11(vx, p) t11 = vx[:, p:] assert r11.shape == t11.shape assert numpy.all(r11.toarray() == t11.toarray()) # Test that is work with shared variable sx = theano.shared(vx) f12 = theano.function([a], sx[:, a:]) r12 = f12(p) t12 = vx[:, p:] assert r12.shape == t12.shape assert numpy.all(r12.toarray() == t12.toarray()) #------------------------------------------------------------ # Invalid things # The syntax is a bit awkward because assertRaises forbids # the [] shortcut for getitem. # x[a:b] is not accepted because we don't have sparse vectors self.assertRaises(NotImplementedError, x.__getitem__, (slice(a, b), c)) # x[a:b:step, c:d] is not accepted because scipy silently drops # the step (!) if not is_supported_version: self.assertRaises(ValueError, x.__getitem__, (slice(a, b, -1), slice(c, d))) self.assertRaises(ValueError, x.__getitem__, (slice(a, b), slice(c, d, 2))) else: raise SkipTest("Slicing with step is supported.") # Advanced indexing is not supported self.assertRaises(ValueError, x.__getitem__, (tensor.ivector('l'), slice(a, b))) # Indexing with random things is not supported either self.assertRaises(ValueError, x.__getitem__, slice(tensor.fscalar('f'), None)) self.assertRaises(ValueError, x.__getitem__, (slice(None), slice([1, 3, 4], None))) def test_GetItemScalar(self): sparse_formats = ('csc', 'csr') for format in sparse_formats: x = theano.sparse.csc_matrix('x') a = theano.tensor.iscalar() b = theano.tensor.iscalar() m = 50 n = 42 vx = as_sparse_format(self.rng.binomial(1, 0.5, (97, 100)), format).astype(theano.config.floatX) f1 = theano.function([x, a, b], x[a, b]) r1 = f1(vx, 10, 10) t1 = vx[10, 10] assert r1.shape == t1.shape assert numpy.all(t1 == r1) f2 = theano.function([x, a], x[50, a]) r2 = f2(vx, m) t2 = vx[50, m] assert r2.shape == t2.shape assert numpy.all(t2 == r2) f3 = theano.function([x, a], x[a, 50]) r3 = f3(vx, m) t3 = vx[m, 50] assert r3.shape == t3.shape assert numpy.all(t3 == r3) f4 = theano.function([x], x[50, 42]) r4 = f4(vx) t4 = vx[m, n] assert r3.shape == t3.shape assert numpy.all(t4 == r4) # Test that is work with shared variable sx = theano.shared(vx) f1 = theano.function([a, b], sx[a, b]) r1 = f1(10, 10) t1 = vx[10, 10] assert r1.shape == t1.shape assert numpy.all(t1 == r1) class CastTester(utt.InferShapeTester): def setUp(self): super(CastTester, self).setUp() # slow but only test def test_cast(self): for format in sparse.sparse_formats: for i_dtype in sparse.all_dtypes: for o_dtype in sparse.all_dtypes: (variable, ), (data, ) = sparse_random_inputs( format, shape=(4, 7), out_dtype=i_dtype) func = theano.function([variable], cast(variable, o_dtype)) cls = theano.function([variable], Cast(o_dtype)(variable)) prop = theano.function([variable], variable.astype(o_dtype)) t_func, t_cls, t_prop = func(data), cls(data), prop(data) expected = data.toarray().astype(o_dtype) assert t_func.format == format assert t_cls.format == format assert t_prop.format == format t_func = t_func.toarray() t_cls = t_cls.toarray() t_prop = t_prop.toarray() utt.assert_allclose(expected, t_func) utt.assert_allclose(expected, t_cls) utt.assert_allclose(expected, t_prop) @attr('slow') def test_infer_shape(self): for format in sparse.sparse_formats: for i_dtype in sparse.all_dtypes: for o_dtype in sparse.all_dtypes: variable, data = sparse_random_inputs( format, shape=(4, 7), out_dtype=i_dtype) self._compile_and_check(variable, [Cast(o_dtype)(*variable)], data, Cast) def test_grad(self): for format in sparse.sparse_formats: for i_dtype in sparse.float_dtypes: for o_dtype in tensor.float_dtypes: if o_dtype == 'float16': # Don't test float16 output. continue _, data = sparse_random_inputs( format, shape=(4, 7), out_dtype=i_dtype) eps = None if o_dtype == 'float32': eps = 1e-2 verify_grad_sparse(Cast(o_dtype), data, eps=eps) def _format_info(nb): x = {} mat = {} for format in sparse.sparse_formats: variable = getattr(theano.sparse, format + '_matrix') spa = getattr(sp, format + '_matrix') x[format] = [variable() for t in range(nb)] mat[format] = [spa(random_lil((3, 4), theano.config.floatX, 8)) for t in range(nb)] return x, mat class _HVStackTester(utt.InferShapeTester): """Test for both HStack and VStack. """ nb = 3 # Number of sparse matrix to stack x, mat = _format_info(nb) def test_op(self): for format in sparse.sparse_formats: for out_f in sparse.sparse_formats: for dtype in sparse.all_dtypes: blocks = self.mat[format] f = theano.function( self.x[format], self.op_class( format=out_f, dtype=dtype)(*self.x[format]), allow_input_downcast=True) tested = f(*blocks) expected = self.expected_f(blocks, format=out_f, dtype=dtype) utt.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype def test_infer_shape(self): for format in sparse.sparse_formats: self._compile_and_check(self.x[format], [self.op_class(dtype='float64') (*self.x[format])], self.mat[format], self.op_class) def test_grad(self): for format in sparse.sparse_formats: for out_f in sparse.sparse_formats: for dtype in sparse.float_dtypes: verify_grad_sparse( self.op_class(format=out_f, dtype=dtype), self.mat[format], structured=False, eps=1e-2, ) def _hv_switch(op, expected_function): """Return the right test class for HStack or VStack. :Parameters: - `op`: HStack or VStack class. - `expected_function`: function from scipy for comparaison. """ class XStackTester(_HVStackTester): op_class = op def expected_f(self, a, format=None, dtype=None): return expected_function(a, format, dtype) XStackTester.__name__ = op.__name__ + "Tester" return XStackTester HStackTester = _hv_switch(HStack, sp.hstack) VStackTester = _hv_switch(VStack, sp.vstack) class AddSSDataTester(utt.InferShapeTester): x = {} a = {} def setUp(self): super(AddSSDataTester, self).setUp() self.op_class = AddSSData for format in sparse.sparse_formats: variable = getattr(theano.sparse, format + '_matrix') rand = numpy.array( numpy.random.random_integers(3, size=(3, 4)) - 1, dtype=theano.config.floatX) constant = as_sparse_format(rand, format) self.x[format] = [variable() for t in range(2)] self.a[format] = [constant for t in range(2)] def test_op(self): for format in sparse.sparse_formats: f = theano.function( self.x[format], add_s_s_data(*self.x[format])) tested = f(*self.a[format]) expected = 2 * self.a[format][0] utt.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype def test_infer_shape(self): for format in sparse.sparse_formats: self._compile_and_check(self.x[format], [add_s_s_data(*self.x[format])], self.a[format], self.op_class) def test_grad(self): for format in sparse.sparse_formats: verify_grad_sparse(self.op_class(), self.a[format], structured=True) def elemwise_checker(op, expected_f, gap=None, test_dtypes=None, grad_test=True, name=None, gap_grad=None): """Return the appropriate test class for the elemwise on sparse. :param op: Op to test. :expected_f: Function use to compare. This function must act on dense matrix. If the op is structured see the `structure_function` decorator to make this function structured. :param gap: Tuple for the range of the random sample. When length is 1, it is assumed to be the exclusive max, when `gap` = (`a`, `b`) it provide a sample from [a, b[. If `None` is used, it provide [0, 1] for float dtypes and [0, 50[ for integer dtypes. :param test_dtypes: Particular dtypes for testing the op. If `None`, this is set to the most common dtypes. :param grad_test: True for testing the grad. False will skip this test. :param gap_grad: If None, we reuse gap. Otherwise it is the same as gap but for testing the gradiant of the op. :return: The class that perform the tests, not an instance of the class. """ if test_dtypes is None: test_dtypes = sparse.all_dtypes class Tester(unittest.TestCase): def setUp(self): super(Tester, self).setUp() self.op = op self.expected_f = expected_f self.gap = gap if gap_grad is not None: self.gap_grad = gap_grad else: self.gap_grad = gap # Ensure the test's name is correct. utt.seed_rng() assert eval(self.__class__.__name__) is self.__class__ def test_op(self): for format in sparse.sparse_formats: for dtype in test_dtypes: if dtype == 'int8' or dtype == 'uint8': continue # When testing with unsigned integers, # we must check if the gap contains # negative numbers. if dtype.startswith('uint'): if self.gap and len(self.gap) == 2 and self.gap[0] < 0: if self.gap[1] >= 1: self.gap = (0, self.gap[1]) else: raise TypeError('Gap not suitable for', dtype, self.__name__) variable, data = sparse_random_inputs( format, shape=(4, 7), out_dtype=dtype, gap=self.gap) f = theano.function(variable, self.op(*variable)) tested = f(*data) data = [m.toarray() for m in data] expected = self.expected_f(*data) assert tested.format == format tested = tested.toarray() try: utt.assert_allclose(expected, tested) except AssertionError: raise AssertionError(self.__name__) # Test with int8 as dtype # These tests are not in the loop for two reasons. # First, in recent version of numpy, when a numpy # function have int8 as input dtype, it returns a # float16 as output dtype. Since this does not provide # enough precision, we upcast the data before we apply the # function. # Second, the tolerance for the checkup in DebugMode # is too high. for dtype in ['int8', 'uint8']: if dtype in test_dtypes: if self.gap: domain = self.gap # When testing with unsigned integers, # we must check if the gap contains # negative numbers. if dtype == 'uint8': if len(domain) == 2 and domain[0] < 0: if domain[1] >= 1: domain = (0, domain[1]) else: raise TypeError('Gap not suitable for', dtype, self.__name__) else: domain = (0, 5) variable, data = sparse_random_inputs( format, shape=(4, 7), out_dtype=dtype, gap=domain) f = theano.function(variable, self.op(*variable)) old_value = (tensor.basic.float32_atol, tensor.basic.float32_rtol, tensor.basic.float64_atol, tensor.basic.float64_rtol) tensor.basic.float32_atol = 1e-4 tensor.basic.float32_rtol = 1e-3 tensor.basic.float64_atol = 1e-3 tensor.basic.float64_rtol = 1e-4 try: tested = f(*data) finally: (tensor.basic.float32_atol, tensor.basic.float32_rtol, tensor.basic.float64_atol, tensor.basic.float64_rtol) = old_value data = [m.toarray().astype('float32') for m in data] expected = self.expected_f(*data) assert tested.format == format tested = tested.toarray() try: utt.assert_allclose(tested, expected, rtol=1e-2) except AssertionError: raise AssertionError(self.__name__) if grad_test: def test_grad(self): for format in sparse.sparse_formats: for dtype in sparse.float_dtypes: variable, data = sparse_random_inputs( format, shape=(4, 7), out_dtype=dtype, gap=self.gap_grad) verify_grad_sparse(self.op, data, structured=True) # Set proper class name to uniquely identify tests. # Note that it is important to run this code *outside* of the `Tester` # class itself, otherwise it will not work properly for some reason. if name is None: name = op.__name__.capitalize() + 'Tester' Tester.__name__ = name assert 'Roundhalftoeven' not in Tester.__name__ return Tester def test_hstack_vstack(): """ Tests sparse.hstack and sparse.vstack (as opposed to the HStack and VStack classes that they wrap). """ def make_block(dtype): return theano.sparse.csr_matrix(name="%s block" % dtype, dtype=dtype) def get_expected_dtype(blocks, to_dtype): if to_dtype is None: block_dtypes = tuple(b.dtype for b in blocks) return theano.scalar.upcast(*block_dtypes) else: return to_dtype # a deliberately weird mix of dtypes to stack dtypes = ('complex128', theano.config.floatX) blocks = [make_block(dtype) for dtype in dtypes] for stack_dimension, stack_function in enumerate((theano.sparse.vstack, theano.sparse.hstack)): for to_dtype in (None, ) + dtypes: stacked_blocks = stack_function(blocks, dtype=to_dtype) expected_dtype = get_expected_dtype(blocks, to_dtype) assert stacked_blocks.dtype == expected_dtype def structure_function(f, index=0): """Decorator to structure a function wich apply on dense matrix. Here, the inputs of the function must be dense matrix. The sparse pattern is determined by finding the zeros. :param index: The index of the parameter from wich the function must be structured. :return: The structured function for its `index` parameter. """ def structured_function(*args): pattern = args[index] evaluated = f(*args) evaluated[pattern == 0] = 0 return evaluated return structured_function StructuredSigmoidTester = elemwise_checker( sparse.structured_sigmoid, structure_function(lambda x: 1.0 / (1.0 + numpy.exp(-x))), test_dtypes=[m for m in sparse.all_dtypes if (not m in sparse.complex_dtypes and not m.startswith('uint'))], gap=(-5, 5), name='StructuredSigmoidTester') StructuredExpTester = elemwise_checker( sparse.structured_exp, structure_function(numpy.exp), name='StructuredExpTester') StructuredLogTester = elemwise_checker( sparse.structured_log, structure_function(numpy.log), gap=(0.5, 10), name='StructuredLogTester') StructuredPowTester = elemwise_checker( lambda x: sparse.structured_pow(x, 2), structure_function(lambda x: numpy.power(x, 2)), name='StructuredPowTester') StructuredMinimumTester = elemwise_checker( lambda x: structured_minimum(x, 2), structure_function(lambda x: numpy.minimum(x, 2)), name='StructuredMinimumTester') StructuredMaximumTester = elemwise_checker( lambda x: structured_maximum(x, 2), structure_function(lambda x: numpy.maximum(x, 2)), name='StructuredMaximumTester') StructuredAddTester = elemwise_checker( lambda x: structured_add(x, 2), structure_function(lambda x: numpy.add(x, 2)), name='StructuredAddTester') SinTester = elemwise_checker( sparse.sin, numpy.sin) TanTester = elemwise_checker( sparse.tan, numpy.tan, gap=(-1, 1)) ArcsinTester = elemwise_checker( sparse.arcsin, numpy.arcsin, gap=(-1, 1), gap_grad=(-0.99, 0.99)) ArctanTester = elemwise_checker( sparse.arctan, numpy.arctan) SinhTester = elemwise_checker( sparse.sinh, numpy.sinh) ArcsinhTester = elemwise_checker( sparse.arcsinh, numpy.arcsinh, gap=(-1, 1)) TanhTester = elemwise_checker( sparse.tanh, numpy.tanh, gap=(-1, 1)) ArctanhTester = elemwise_checker( sparse.arctanh, numpy.arctanh, gap=(-0.9, 1), gap_grad=(-0.9, 0.95)) RintTester = elemwise_checker( sparse.rint, numpy.rint, grad_test=False, test_dtypes=sparse.float_dtypes) SgnTester = elemwise_checker( sparse.sgn, numpy.sign, grad_test=False, test_dtypes=[m for m in sparse.all_dtypes if (not m in sparse.complex_dtypes and not m.startswith('uint'))]) CeilTester = elemwise_checker( sparse.ceil, numpy.ceil, grad_test=False, test_dtypes=[m for m in sparse.all_dtypes if not m in sparse.complex_dtypes]) FloorTester = elemwise_checker( sparse.floor, numpy.floor, grad_test=False, test_dtypes=[m for m in sparse.all_dtypes if not m in sparse.complex_dtypes]) Log1pTester = elemwise_checker( sparse.log1p, numpy.log1p, gap=(0.5, 10)) Expm1Tester = elemwise_checker( sparse.expm1, numpy.expm1) Deg2radTester = elemwise_checker( sparse.deg2rad, numpy.deg2rad, test_dtypes=[m for m in sparse.all_dtypes if not m in sparse.complex_dtypes]) Rad2degTester = elemwise_checker( sparse.rad2deg, numpy.rad2deg, test_dtypes=[m for m in sparse.all_dtypes if not m in sparse.complex_dtypes]) TruncTester = elemwise_checker( sparse.trunc, numpy.trunc, test_dtypes=[m for m in sparse.all_dtypes if not m in sparse.complex_dtypes]) SqrTester = elemwise_checker( sparse.sqr, lambda x: x * x) SqrtTester = elemwise_checker( sparse.sqrt, numpy.sqrt, gap=(0, 10)) ConjTester = elemwise_checker( sparse.conj, numpy.conj, grad_test=False) class MulSVTester(unittest.TestCase): def setUp(self): utt.seed_rng() def test_mul_s_v_grad(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: spmat = sp_types[format](random_lil((4, 3), dtype, 3)) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) theano.sparse.verify_grad_sparse(mul_s_v, [spmat, mat], structured=True) def test_mul_s_v(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: x = theano.sparse.SparseType(format, dtype=dtype)() y = tensor.vector(dtype=dtype) f = theano.function([x, y], mul_s_v(x, y)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) out = f(spmat, mat) utt.assert_allclose(spmat.toarray() * mat, out.toarray()) class StructuredAddSVTester(unittest.TestCase): def setUp(self): utt.seed_rng() def test_structured_add_s_v_grad(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: spmat = sp_types[format](random_lil((4, 3), dtype, 3)) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) theano.sparse.verify_grad_sparse(structured_add_s_v, [spmat, mat], structured=True) def test_structured_add_s_v(self): sp_types = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix} for format in ['csr', 'csc']: for dtype in ['float32', 'float64']: x = theano.sparse.SparseType(format, dtype=dtype)() y = tensor.vector(dtype=dtype) f = theano.function([x, y], structured_add_s_v(x, y)) spmat = sp_types[format](random_lil((4, 3), dtype, 3)) spones = spmat.copy() spones.data = numpy.ones_like(spones.data) mat = numpy.asarray(numpy.random.rand(3), dtype=dtype) out = f(spmat, mat) utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)), out.toarray()) class TrueDotTester(utt.InferShapeTester): def setUp(self): super(TrueDotTester, self).setUp() self.op = true_dot self.op_class = TrueDot def test_op_ss(self): for format in sparse.sparse_formats: for dtype in sparse.all_dtypes: variable, data = sparse_random_inputs(format, shape=(10, 10), out_dtype=dtype, n=2, p=0.1) f = theano.function(variable, self.op(*variable)) tested = f(*data) x, y = [m.toarray() for m in data] expected = numpy.dot(x, y) assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() utt.assert_allclose(tested, expected) def test_op_sd(self): for format in sparse.sparse_formats: for dtype in sparse.all_dtypes: variable, data = sparse_random_inputs(format, shape=(10, 10), out_dtype=dtype, n=2, p=0.1) variable[1] = tensor.TensorType(dtype=dtype, broadcastable=(False, False))() data[1] = data[1].toarray() f = theano.function(variable, self.op(*variable)) tested = f(*data) expected = numpy.dot(data[0].toarray(), data[1]) assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() utt.assert_allclose(tested, expected) def test_infer_shape(self): for format in sparse.sparse_formats: for dtype in sparse.all_dtypes: (x, ), (x_value, ) = sparse_random_inputs(format, shape=(9, 10), out_dtype=dtype, p=0.1) (y, ), (y_value, ) = sparse_random_inputs(format, shape=(10, 24), out_dtype=dtype, p=0.1) variable = [x, y] data = [x_value, y_value] self._compile_and_check(variable, [self.op(*variable)], data, self.op_class) def test_grad(self): for format in sparse.sparse_formats: for dtype in sparse.float_dtypes: (x, ), (x_value, ) = sparse_random_inputs(format, shape=(9, 10), out_dtype=dtype, p=0.1) (y, ), (y_value, ) = sparse_random_inputs(format, shape=(10, 24), out_dtype=dtype, p=0.1) variable = [x, y] data = [x_value, y_value] verify_grad_sparse( self.op, data, structured=False) class SamplingDotTester(utt.InferShapeTester): x = [tensor.matrix() for t in range(2)] x.append(sparse.csr_matrix()) # unsquare shape a = [numpy.array(numpy.random.random_integers(5, size=(4, 3)) - 1, dtype=theano.config.floatX), numpy.array(numpy.random.random_integers(5, size=(5, 3)) - 1, dtype=theano.config.floatX), numpy.array(numpy.random.random_integers(2, size=(4, 5)) - 1, dtype=theano.config.floatX) ] a[2] = sp.csr_matrix(a[2]) def setUp(self): super(SamplingDotTester, self).setUp() self.op_class = SamplingDot def test_op(self): f = theano.function( self.x, sampling_dot(*self.x)) tested = f(*self.a) x, y, p = self.a expected = p.multiply(numpy.dot(x, y.T)) utt.assert_allclose(as_ndarray(expected), tested.toarray()) assert tested.format == 'csr' assert tested.dtype == expected.dtype def test_infer_shape(self): self._compile_and_check(self.x, [sampling_dot(*self.x)], self.a, self.op_class, excluding=['local_sampling_dot_csr']) def test_grad(self): def _helper(x, y): return sampling_dot(x, y, self.a[2]) verify_grad_sparse(_helper, self.a[:2]) import theano.tensor.tests.test_sharedvar test_shared_options = theano.tensor.tests.test_sharedvar.makeSharedTester( shared_constructor_=theano.sparse.shared, dtype_='float64', get_value_borrow_true_alias_=True, shared_borrow_true_alias_=True, set_value_borrow_true_alias_=True, set_value_inplace_=False, set_cast_value_inplace_=False, shared_constructor_accept_ndarray_=False, internal_type_=scipy.sparse.csc_matrix, test_internal_type_=scipy.sparse.issparse, theano_fct_=lambda a: dense_from_sparse(a * 2.), ref_fct_=lambda a: numpy.asarray((a * 2).todense()), cast_value_=scipy.sparse.csr_matrix, name='test_shared_options', ) if __name__ == '__main__': unittest.main()
mit
adedayo/intellij-community
python/lib/Lib/site-packages/django/forms/widgets.py
73
32961
""" HTML Widget classes """ import datetime from itertools import chain import time from urlparse import urljoin from util import flatatt import django.utils.copycompat as copy from django.conf import settings from django.utils.datastructures import MultiValueDict, MergeDict from django.utils.html import escape, conditional_escape from django.utils.translation import ugettext, ugettext_lazy from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe from django.utils import datetime_safe, formats __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput', 'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', ) MEDIA_TYPES = ('css','js') class Media(StrAndUnicode): def __init__(self, media=None, **kwargs): if media: media_attrs = media.__dict__ else: media_attrs = kwargs self._css = {} self._js = [] for name in MEDIA_TYPES: getattr(self, 'add_' + name)(media_attrs.get(name, None)) # Any leftover attributes must be invalid. # if media_attrs != {}: # raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys())) def __unicode__(self): return self.render() def render(self): return mark_safe(u'\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES]))) def render_js(self): return [u'<script type="text/javascript" src="%s"></script>' % self.absolute_path(path) for path in self._js] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = self._css.keys() media.sort() return chain(*[ [u'<link href="%s" type="text/css" media="%s" rel="stylesheet" />' % (self.absolute_path(path), medium) for path in self._css[medium]] for medium in media]) def absolute_path(self, path, prefix=None): if path.startswith(u'http://') or path.startswith(u'https://') or path.startswith(u'/'): return path if prefix is None: if settings.STATIC_URL is None: # backwards compatibility prefix = settings.MEDIA_URL else: prefix = settings.STATIC_URL return urljoin(prefix, path) def __getitem__(self, name): "Returns a Media object that only contains media of the given type" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) def add_js(self, data): if data: for path in data: if path not in self._js: self._js.append(path) def add_css(self, data): if data: for medium, paths in data.items(): for path in paths: if not self._css.get(medium) or path not in self._css[medium]: self._css.setdefault(medium, []).append(path) def __add__(self, other): combined = Media() for name in MEDIA_TYPES: getattr(combined, 'add_' + name)(getattr(self, '_' + name, None)) getattr(combined, 'add_' + name)(getattr(other, '_' + name, None)) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists if hasattr(super(cls, self), 'media'): base = super(cls, self).media else: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend == True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) else: return Media(definition) else: return base return property(_media) class MediaDefiningClass(type): "Metaclass for classes that can have media definitions" def __new__(cls, name, bases, attrs): new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(object): __metaclass__ = MediaDefiningClass is_hidden = False # Determines whether this corresponds to an <input type="hidden">. needs_multipart_form = False # Determines does this widget need multipart-encrypted form is_localized = False is_required = False def __init__(self, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj def render(self, name, value, attrs=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ raise NotImplementedError def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." attrs = dict(self.attrs, **kwargs) if extra_attrs: attrs.update(extra_attrs) return attrs def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, returns the value of this widget. Returns None if it's not provided. """ return data.get(name, None) def _has_changed(self, initial, data): """ Return True if data differs from initial. """ # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or inital value we get # is None, replace it w/ u''. if data is None: data_value = u'' else: data_value = data if initial is None: initial_value = u'' else: initial_value = initial if force_unicode(initial_value) != force_unicode(data_value): return True return False def id_for_label(self, id_): """ Returns the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Returns None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ id_for_label = classmethod(id_for_label) class Input(Widget): """ Base class for all <input> widgets (except type='checkbox' and type='radio', which are special). """ input_type = None # Subclasses must define this. def _format_value(self, value): if self.is_localized: return formats.localize_input(value) return value def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) if value != '': # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(self._format_value(value)) return mark_safe(u'<input%s />' % flatatt(final_attrs)) class TextInput(Input): input_type = 'text' class PasswordInput(Input): input_type = 'password' def __init__(self, attrs=None, render_value=False): super(PasswordInput, self).__init__(attrs) self.render_value = render_value def render(self, name, value, attrs=None): if not self.render_value: value=None return super(PasswordInput, self).render(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' is_hidden = True class MultipleHiddenInput(HiddenInput): """ A widget that handles <input type="hidden"> for fields that have a list of values. """ def __init__(self, attrs=None, choices=()): super(MultipleHiddenInput, self).__init__(attrs) # choices can be any iterable self.choices = choices def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) id_ = final_attrs.get('id', None) inputs = [] for i, v in enumerate(value): input_attrs = dict(value=force_unicode(v), **final_attrs) if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. input_attrs['id'] = '%s_%s' % (id_, i) inputs.append(u'<input%s />' % flatatt(input_attrs)) return mark_safe(u'\n'.join(inputs)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) class FileInput(Input): input_type = 'file' needs_multipart_form = True def render(self, name, value, attrs=None): return super(FileInput, self).render(name, None, attrs=attrs) def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name, None) def _has_changed(self, initial, data): if data is None: return False return True FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): initial_text = ugettext_lazy('Currently') input_text = ugettext_lazy('Change') clear_checkbox_label = ugettext_lazy('Clear') template_with_initial = u'%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s' template_with_clear = u'%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def render(self, name, value, attrs=None): substitutions = { 'initial_text': self.initial_text, 'input_text': self.input_text, 'clear_template': '', 'clear_checkbox_label': self.clear_checkbox_label, } template = u'%(input)s' substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs) if value and hasattr(value, "url"): template = self.template_with_initial substitutions['initial'] = (u'<a target="_blank" href="%s">%s</a>' % (value.url, value)) if not self.is_required: checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) substitutions['clear_checkbox_name'] = checkbox_name substitutions['clear_checkbox_id'] = checkbox_id substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id}) substitutions['clear_template'] = self.template_with_clear % substitutions return mark_safe(template % substitutions) def value_from_datadict(self, data, files, name): upload = super(ClearableFileInput, self).value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload class Textarea(Widget): def __init__(self, attrs=None): # The 'rows' and 'cols' attributes are required for HTML correctness. default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super(Textarea, self).__init__(default_attrs) def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), conditional_escape(force_unicode(value)))) class DateInput(Input): input_type = 'text' format = '%Y-%m-%d' # '2006-10-25' def __init__(self, attrs=None, format=None): super(DateInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATE_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_date(value) return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('DATE_INPUT_FORMATS')[0] initial = datetime.date(*time.strptime(initial, input_format)[:3]) except (TypeError, ValueError): pass return super(DateInput, self)._has_changed(self._format_value(initial), data) class DateTimeInput(Input): input_type = 'text' format = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59' def __init__(self, attrs=None, format=None): super(DateTimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_datetime(value) return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('DATETIME_INPUT_FORMATS')[0] initial = datetime.datetime(*time.strptime(initial, input_format)[:6]) except (TypeError, ValueError): pass return super(DateTimeInput, self)._has_changed(self._format_value(initial), data) class TimeInput(Input): input_type = 'text' format = '%H:%M:%S' # '14:30:59' def __init__(self, attrs=None, format=None): super(TimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('TIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): return value.strftime(self.format) return value def _has_changed(self, initial, data): # If our field has show_hidden_initial=True, initial will be a string # formatted by HiddenInput using formats.localize_input, which is not # necessarily the format used for this widget. Attempt to convert it. try: input_format = formats.get_format('TIME_INPUT_FORMATS')[0] initial = datetime.time(*time.strptime(initial, input_format)[3:6]) except (TypeError, ValueError): pass return super(TimeInput, self)._has_changed(self._format_value(initial), data) class CheckboxInput(Widget): def __init__(self, attrs=None, check_test=bool): super(CheckboxInput, self).__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = check_test def render(self, name, value, attrs=None): final_attrs = self.build_attrs(attrs, type='checkbox', name=name) try: result = self.check_test(value) except: # Silently catch exceptions result = False if result: final_attrs['checked'] = 'checked' if value not in ('', True, False, None): # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(value) return mark_safe(u'<input%s />' % flatatt(final_attrs)) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, basestring): value = values.get(value.lower(), value) return value def _has_changed(self, initial, data): # Sometimes data or initial could be None or u'' which should be the # same thing as False. return bool(initial) != bool(data) class Select(Widget): def __init__(self, attrs=None, choices=()): super(Select, self).__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def render(self, name, value, attrs=None, choices=()): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) output = [u'<select%s>' % flatatt(final_attrs)] options = self.render_options(choices, [value]) if options: output.append(options) output.append(u'</select>') return mark_safe(u'\n'.join(output)) def render_option(self, selected_choices, option_value, option_label): option_value = force_unicode(option_value) selected_html = (option_value in selected_choices) and u' selected="selected"' or '' return u'<option value="%s"%s>%s</option>' % ( escape(option_value), selected_html, conditional_escape(force_unicode(option_label))) def render_options(self, choices, selected_choices): # Normalize to strings. selected_choices = set([force_unicode(v) for v in selected_choices]) output = [] for option_value, option_label in chain(self.choices, choices): if isinstance(option_label, (list, tuple)): output.append(u'<optgroup label="%s">' % escape(force_unicode(option_value))) for option in option_label: output.append(self.render_option(selected_choices, *option)) output.append(u'</optgroup>') else: output.append(self.render_option(selected_choices, option_value, option_label)) return u'\n'.join(output) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ((u'1', ugettext('Unknown')), (u'2', ugettext('Yes')), (u'3', ugettext('No'))) super(NullBooleanSelect, self).__init__(attrs, choices) def render(self, name, value, attrs=None, choices=()): try: value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value] except KeyError: value = u'1' return super(NullBooleanSelect, self).render(name, value, attrs, choices) def value_from_datadict(self, data, files, name): value = data.get(name, None) return {u'2': True, True: True, 'True': True, u'3': False, 'False': False, False: False}.get(value, None) def _has_changed(self, initial, data): # For a NullBooleanSelect, None (unknown) and False (No) # are not the same if initial is not None: initial = bool(initial) if data is not None: data = bool(data) return initial != data class SelectMultiple(Select): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, name=name) output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)] options = self.render_options(choices, value) if options: output.append(options) output.append('</select>') return mark_safe(u'\n'.join(output)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) def _has_changed(self, initial, data): if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = set([force_unicode(value) for value in initial]) data_set = set([force_unicode(value) for value in data]) return data_set != initial_set class RadioInput(StrAndUnicode): """ An object used by RadioFieldRenderer that represents a single <input type='radio'>. """ def __init__(self, name, value, attrs, choice, index): self.name, self.value = name, value self.attrs = attrs self.choice_value = force_unicode(choice[0]) self.choice_label = force_unicode(choice[1]) self.index = index def __unicode__(self): if 'id' in self.attrs: label_for = ' for="%s_%s"' % (self.attrs['id'], self.index) else: label_for = '' choice_label = conditional_escape(force_unicode(self.choice_label)) return mark_safe(u'<label%s>%s %s</label>' % (label_for, self.tag(), choice_label)) def is_checked(self): return self.value == self.choice_value def tag(self): if 'id' in self.attrs: self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index) final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value) if self.is_checked(): final_attrs['checked'] = 'checked' return mark_safe(u'<input%s />' % flatatt(final_attrs)) class RadioFieldRenderer(StrAndUnicode): """ An object used by RadioSelect to enable customization of radio widgets. """ def __init__(self, name, value, attrs, choices): self.name, self.value, self.attrs = name, value, attrs self.choices = choices def __iter__(self): for i, choice in enumerate(self.choices): yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx) def __unicode__(self): return self.render() def render(self): """Outputs a <ul> for this set of radio fields.""" return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self])) class RadioSelect(Select): renderer = RadioFieldRenderer def __init__(self, *args, **kwargs): # Override the default renderer if we were passed one. renderer = kwargs.pop('renderer', None) if renderer: self.renderer = renderer super(RadioSelect, self).__init__(*args, **kwargs) def get_renderer(self, name, value, attrs=None, choices=()): """Returns an instance of the renderer.""" if value is None: value = '' str_value = force_unicode(value) # Normalize to string. final_attrs = self.build_attrs(attrs) choices = list(chain(self.choices, choices)) return self.renderer(name, str_value, final_attrs, choices) def render(self, name, value, attrs=None, choices=()): return self.get_renderer(name, value, attrs, choices).render() def id_for_label(self, id_): # RadioSelect is represented by multiple <input type="radio"> fields, # each of which has a distinct ID. The IDs are made distinct by a "_X" # suffix, where X is the zero-based index of the radio field. Thus, # the label for a RadioSelect should reference the first one ('_0'). if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class CheckboxSelectMultiple(SelectMultiple): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = [u'<ul>'] # Normalize to strings str_values = set([force_unicode(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = u' for="%s"' % final_attrs['id'] else: label_for = '' cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values) option_value = force_unicode(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_unicode(option_label)) output.append(u'<li><label%s>%s %s</label></li>' % (label_for, rendered_cb, option_label)) output.append(u'</ul>') return mark_safe(u'\n'.join(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. Its render() method is different than other widgets', because it has to figure out how to split a single value for display in multiple widgets. The ``value`` argument can be one of two things: * A list. * A normal value (e.g., a string) that has been "compressed" from a list of values. In the second case -- i.e., if the value is NOT a list -- render() will first "decompress" the value into a list before rendering it. It does so by calling the decompress() method, which MultiWidget subclasses must implement. This method takes a single "compressed" value and returns a list. When render() does its HTML rendering, each value in the list is rendered with the corresponding widget -- the first value is rendered in the first widget, the second value is rendered in the second widget, etc. Subclasses may implement format_output(), which takes the list of rendered widgets and returns a string of HTML that formats them any way you'd like. You'll probably want to use this class with MultiValueField. """ def __init__(self, widgets, attrs=None): self.widgets = [isinstance(w, type) and w() or w for w in widgets] super(MultiWidget, self).__init__(attrs) def render(self, name, value, attrs=None): if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) def value_from_datadict(self, data, files, name): return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)] def _has_changed(self, initial, data): if initial is None: initial = [u'' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.decompress(initial) for widget, initial, data in zip(self.widgets, initial, data): if widget._has_changed(initial, data): return True return False def format_output(self, rendered_widgets): """ Given a list of rendered widgets (as strings), returns a Unicode string representing the HTML for the whole lot. This hook allows you to format the HTML design of the widgets, if needed. """ return u''.join(rendered_widgets) def decompress(self, value): """ Returns a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): "Media for a multiwidget is the combination of all media of the subwidgets" media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super(MultiWidget, self).__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj class SplitDateTimeWidget(MultiWidget): """ A Widget that splits datetime input into two <input type="text"> boxes. """ date_format = DateInput.format time_format = TimeInput.format def __init__(self, attrs=None, date_format=None, time_format=None): widgets = (DateInput(attrs=attrs, format=date_format), TimeInput(attrs=attrs, format=time_format)) super(SplitDateTimeWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: return [value.date(), value.time().replace(microsecond=0)] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A Widget that splits datetime input into two <input type="hidden"> inputs. """ is_hidden = True def __init__(self, attrs=None, date_format=None, time_format=None): super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format) for widget in self.widgets: widget.input_type = 'hidden' widget.is_hidden = True
apache-2.0
twinshadow/ScintillaGL
scintilla/HFacer.py
22
2312
#!/usr/bin/env python # HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface # definition file. # The header files are copied to a temporary file apart from the section between a /* ++Autogenerated*/ # comment and a /* --Autogenerated*/ comment which is generated by the printHFile and printLexHFile # functions. After the temporary file is created, it is copied back to the original file name. import sys import os import Face def Contains(s,sub): return s.find(sub) != -1 def printLexHFile(f,out): for name in f.order: v = f.features[name] if v["FeatureType"] in ["val"]: if Contains(name, "SCE_") or Contains(name, "SCLEX_"): out.write("#define " + name + " " + v["Value"] + "\n") def printHFile(f,out): for name in f.order: v = f.features[name] if v["Category"] != "Deprecated": if v["FeatureType"] in ["fun", "get", "set"]: featureDefineName = "SCI_" + name.upper() out.write("#define " + featureDefineName + " " + v["Value"] + "\n") elif v["FeatureType"] in ["evt"]: featureDefineName = "SCN_" + name.upper() out.write("#define " + featureDefineName + " " + v["Value"] + "\n") elif v["FeatureType"] in ["val"]: if not (Contains(name, "SCE_") or Contains(name, "SCLEX_")): out.write("#define " + name + " " + v["Value"] + "\n") def CopyWithInsertion(input, output, genfn, definition): copying = 1 for line in input.readlines(): if copying: output.write(line) if Contains(line, "/* ++Autogenerated"): copying = 0 genfn(definition, output) if Contains(line, "/* --Autogenerated"): copying = 1 output.write(line) def contents(filename): f = open(filename) t = f.read() f.close() return t def Regenerate(filename, genfn, definition): inText = contents(filename) tempname = "HFacer.tmp" out = open(tempname,"w") hfile = open(filename) CopyWithInsertion(hfile, out, genfn, definition) out.close() hfile.close() outText = contents(tempname) if inText == outText: os.unlink(tempname) else: os.unlink(filename) os.rename(tempname, filename) f = Face.Face() try: f.ReadFromFile("Scintilla.iface") Regenerate("Scintilla.h", printHFile, f) Regenerate("SciLexer.h", printLexHFile, f) print("Maximum ID is %s" % max([x for x in f.values if int(x) < 3000])) except: raise
isc
awm/dmr5200
dmr5200.py
1
3719
# -*- coding: utf-8 -*- import io import time import serial import select class Dmr5200(object): """ Representation of a connection to a DMR-5200 digital multimeter. """ def __init__(self, port, timeout=2): """ Opens the serial connection to the meter. port - The platform dependent serial port string timeout - The timeout (in seconds) to use for serial read/write operations """ self.ser = serial.Serial(port, baudrate=1200, bytesize=serial.SEVENBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_TWO, timeout=timeout) self.sio = io.TextIOWrapper(io.BufferedRWPair(self.ser, self.ser), newline='\r') def request(self): """ Request one reading from the meter. None will be returned if any error occured when processing the returned data, otherwise a dictionary with the following fields will be returned: { 'function': <meter function string>, 'value': <reading value>, 'units': <measurement units string>, 'timestamp': <timestamp of reading reception>, 'raw': <raw serial message string> } 'function' may be one of "DC", "AC", "RES", "FR", "CAP", "IND", "TEMP", "LOG", "BUZ", or "DIO" 'value' may be numeric, True/False/None for logic levels, True/False for continuity, or one of "OPEN"/"SHORT"/"GOOD" for the diode setting, or None if it should be numeric but the meter registered an overload condition 'units' is a string describing the measurement units, or None if not applicable 'timestamp' is an arbitary floating point time value in seconds which can be used to determine the actual interval between completed readings 'raw' is the actual string read from the serial port, including the trailing carriage return character """ try: self.ser.write('\r') line = self.sio.readline() except select.error: return None if len(line) < 6: return None parts = line.split() result = { 'function': parts[0], 'value': None, 'units': None, 'timestamp': time.time(), 'raw': line } if parts[0] in ["DC", "AC", "RES", "FR", "CAP", "IND", "TEMP"]: try: result['value'] = float(parts[1]) result['units'] = parts[2] if parts[0] == "TEMP": result['units'] = u"°C" elif parts[0] == "RES": if parts[2] == "MOHM": result['units'] = u"MΩ" elif parts[2] == "OHM": result['units'] = u"Ω" except ValueError: result['value'] = None except IndexError: return None elif parts[0] == "LOG": try: result['value'] = {'LOW': False, 'HIGH': True, 'UNDET': None}[parts[1]] except IndexError: return None elif parts[0] == "BUZ": try: result['value'] = {'OPEN': False, 'SHORT': True}[parts[1]] except IndexError: return None elif parts[0] == "DIO": try: if parts[1] in ["OPEN", "SHORT", "GOOD"]: result['value'] = parts[1] else: return None except IndexError: return None return result
bsd-3-clause
repotvsupertuga/tvsupertuga.repository
script.module.submundo.addon/pkcs7.py
111
1943
import binascii import StringIO class PKCS7Encoder(object): ''' RFC 2315: PKCS#7 page 21 Some content-encryption algorithms assume the input length is a multiple of k octets, where k > 1, and let the application define a method for handling inputs whose lengths are not a multiple of k octets. For such algorithms, the method shall be to pad the input at the trailing end with k - (l mod k) octets all having value k - (l mod k), where l is the length of the input. In other words, the input is padded at the trailing end with one of the following strings: 01 -- if l mod k = k-1 02 02 -- if l mod k = k-2 . . . k k ... k k -- if l mod k = 0 The padding can be removed unambiguously since all input is padded and no padding string is a suffix of another. This padding method is well-defined if and only if k < 256; methods for larger k are an open issue for further study. ''' def __init__(self, k=16): self.k = k ## @param text The padded text for which the padding is to be removed. # @exception ValueError Raised when the input padding is missing or corrupt. def decode(self, text): ''' Remove the PKCS#7 padding from a text string ''' nl = len(text) val = int(binascii.hexlify(text[-1]), 16) if val > self.k: raise ValueError('Input is not padded or padding is corrupt') l = nl - val return text[:l] ## @param text The text to encode. def encode(self, text): ''' Pad an input string according to PKCS#7 ''' l = len(text) output = StringIO.StringIO() val = self.k - (l % self.k) for _ in xrange(val): output.write('%02x' % val) return text + binascii.unhexlify(output.getvalue())
gpl-2.0
hfp/tensorflow-xsmm
tensorflow/contrib/text/python/ops/skip_gram_ops.py
76
21608
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Skip-gram sampling ops from https://arxiv.org/abs/1301.3781.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv from tensorflow.contrib import lookup from tensorflow.contrib.text.python.ops import gen_skip_gram_ops from tensorflow.contrib.util import loader from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import resource_loader from tensorflow.python.training import input as input_ops _checkpoint_ops_so = loader.load_op_library( resource_loader.get_path_to_datafile("_skip_gram_ops.so")) ops.NotDifferentiable("SkipGramGenerateCandidates") def skip_gram_sample(input_tensor, min_skips=1, max_skips=5, start=0, limit=-1, emit_self_as_target=False, vocab_freq_table=None, vocab_min_count=None, vocab_subsampling=None, corpus_size=None, batch_size=None, batch_capacity=None, seed=None, name=None): """Generates skip-gram token and label paired Tensors from the input tensor. Generates skip-gram `("token", "label")` pairs using each element in the rank-1 `input_tensor` as a token. The window size used for each token will be randomly selected from the range specified by `[min_skips, max_skips]`, inclusive. See https://arxiv.org/abs/1301.3781 for more details about skip-gram. For example, given `input_tensor = ["the", "quick", "brown", "fox", "jumps"]`, `min_skips = 1`, `max_skips = 2`, `emit_self_as_target = False`, the output `(tokens, labels)` pairs for the token "quick" will be randomly selected from either `(tokens=["quick", "quick"], labels=["the", "brown"])` for 1 skip, or `(tokens=["quick", "quick", "quick"], labels=["the", "brown", "fox"])` for 2 skips. If `emit_self_as_target = True`, each token will also be emitted as a label for itself. From the previous example, the output will be either `(tokens=["quick", "quick", "quick"], labels=["the", "quick", "brown"])` for 1 skip, or `(tokens=["quick", "quick", "quick", "quick"], labels=["the", "quick", "brown", "fox"])` for 2 skips. The same process is repeated for each element of `input_tensor` and concatenated together into the two output rank-1 `Tensors` (one for all the tokens, another for all the labels). If `vocab_freq_table` is specified, tokens in `input_tensor` that are not present in the vocabulary are discarded. Tokens whose frequency counts are below `vocab_min_count` are also discarded. Tokens whose frequency proportions in the corpus exceed `vocab_subsampling` may be randomly down-sampled. See Eq. 5 in http://arxiv.org/abs/1310.4546 for more details about subsampling. Due to the random window sizes used for each token, the lengths of the outputs are non-deterministic, unless `batch_size` is specified to batch the outputs to always return `Tensors` of length `batch_size`. Args: input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates. min_skips: `int` or scalar `Tensor` specifying the minimum window size to randomly use for each token. Must be >= 0 and <= `max_skips`. If `min_skips` and `max_skips` are both 0, the only label outputted will be the token itself when `emit_self_as_target = True` - or no output otherwise. max_skips: `int` or scalar `Tensor` specifying the maximum window size to randomly use for each token. Must be >= 0. start: `int` or scalar `Tensor` specifying the position in `input_tensor` from which to start generating skip-gram candidates. limit: `int` or scalar `Tensor` specifying the maximum number of elements in `input_tensor` to use in generating skip-gram candidates. -1 means to use the rest of the `Tensor` after `start`. emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit each token as a label for itself. vocab_freq_table: (Optional) A lookup table (subclass of `lookup.InitializableLookupTableBase`) that maps tokens to their raw frequency counts. If specified, any token in `input_tensor` that is not found in `vocab_freq_table` will be filtered out before generating skip-gram candidates. While this will typically map to integer raw frequency counts, it could also map to float frequency proportions. `vocab_min_count` and `corpus_size` should be in the same units as this. vocab_min_count: (Optional) `int`, `float`, or scalar `Tensor` specifying minimum frequency threshold (from `vocab_freq_table`) for a token to be kept in `input_tensor`. If this is specified, `vocab_freq_table` must also be specified - and they should both be in the same units. vocab_subsampling: (Optional) `float` specifying frequency proportion threshold for tokens from `input_tensor`. Tokens that occur more frequently (based on the ratio of the token's `vocab_freq_table` value to the `corpus_size`) will be randomly down-sampled. Reasonable starting values may be around 1e-3 or 1e-5. If this is specified, both `vocab_freq_table` and `corpus_size` must also be specified. See Eq. 5 in http://arxiv.org/abs/1310.4546 for more details. corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the total number of tokens in the corpus (e.g., sum of all the frequency counts of `vocab_freq_table`). Used with `vocab_subsampling` for down-sampling frequently occurring tokens. If this is specified, `vocab_freq_table` and `vocab_subsampling` must also be specified. batch_size: (Optional) `int` specifying batch size of returned `Tensors`. batch_capacity: (Optional) `int` specifying batch capacity for the queue used for batching returned `Tensors`. Only has an effect if `batch_size` > 0. Defaults to 100 * `batch_size` if not specified. seed: (Optional) `int` used to create a random seed for window size and subsampling. See `set_random_seed` docs for behavior. name: (Optional) A `string` name or a name scope for the operations. Returns: A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of rank-1 and has the same type as `input_tensor`. The `Tensors` will be of length `batch_size`; if `batch_size` is not specified, they will be of random length, though they will be in sync with each other as long as they are evaluated together. Raises: ValueError: If `vocab_freq_table` is not provided, but `vocab_min_count`, `vocab_subsampling`, or `corpus_size` is specified. If `vocab_subsampling` and `corpus_size` are not both present or both absent. """ if vocab_freq_table is None and (vocab_min_count is not None or vocab_subsampling is not None or corpus_size is not None): raise ValueError( "vocab_freq_table is not provided, but vocab_min_count={}, " "vocab_subsampling={}, or corpus_size={} is not None. These settings " "are useless without a vocab_freq_table.".format( vocab_min_count, vocab_subsampling, corpus_size)) if (vocab_subsampling is None) != (corpus_size is None): raise ValueError( "vocab_subsampling is {} while corpus_size is {} - both must be " "provided in order for subsampling to work.".format( vocab_subsampling, corpus_size)) with ops.name_scope( name, "skip_gram_sample", values=[input_tensor, min_skips, max_skips, start, limit]): input_tensor = _filter_input( input_tensor=input_tensor, vocab_freq_table=vocab_freq_table, vocab_min_count=vocab_min_count, vocab_subsampling=vocab_subsampling, corpus_size=corpus_size, seed=seed) seed1, seed2 = random_seed.get_seed(seed) tokens, labels = gen_skip_gram_ops.skip_gram_generate_candidates( input_tensor=input_tensor, min_skips=min_skips, max_skips=max_skips, start=start, limit=limit, emit_self_as_target=emit_self_as_target, # Note that seed here should be seed1! This is due to # GuardedPhiloxRandom's hard-coded attributes of "seed" and "seed2". seed=seed1, seed2=seed2) # TODO(weiho): If the need arises, add support for sparse input_tensor that # figures out sentence boundaries, then calls # skip_gram_generate_candidates() on each sentence. # Batches the (tokens, labels) outputs so that they will be of deterministic # batch_size, to facilitate feeding them into the rest of the network. if batch_size is not None and batch_size > 0: batch_capacity = (batch_capacity if (batch_capacity is not None and batch_capacity > 0) else 100 * batch_size) return input_ops.batch( [tokens, labels], batch_size, capacity=batch_capacity, enqueue_many=True) return tokens, labels def skip_gram_sample_with_text_vocab(input_tensor, vocab_freq_file, vocab_token_index=0, vocab_token_dtype=dtypes.string, vocab_freq_index=1, vocab_freq_dtype=dtypes.float64, vocab_delimiter=",", vocab_min_count=0, vocab_subsampling=None, corpus_size=None, min_skips=1, max_skips=5, start=0, limit=-1, emit_self_as_target=False, batch_size=None, batch_capacity=None, seed=None, name=None): """Skip-gram sampling with a text vocabulary file. Wrapper around `skip_gram_sample()` for use with a text vocabulary file. The vocabulary file is expected to be a plain-text file, with lines of `vocab_delimiter`-separated columns. The `vocab_token_index` column should contain the vocabulary term, while the `vocab_freq_index` column should contain the number of times that term occurs in the corpus. For example, with a text vocabulary file of: ``` bonjour,fr,42 hello,en,777 hola,es,99 ``` You should set `vocab_delimiter=","`, `vocab_token_index=0`, and `vocab_freq_index=2`. See `skip_gram_sample()` documentation for more details about the skip-gram sampling process. Args: input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates. vocab_freq_file: `string` specifying full file path to the text vocab file. vocab_token_index: `int` specifying which column in the text vocab file contains the tokens. vocab_token_dtype: `DType` specifying the format of the tokens in the text vocab file. vocab_freq_index: `int` specifying which column in the text vocab file contains the frequency counts of the tokens. vocab_freq_dtype: `DType` specifying the format of the frequency counts in the text vocab file. vocab_delimiter: `string` specifying the delimiter used in the text vocab file. vocab_min_count: `int`, `float`, or scalar `Tensor` specifying minimum frequency threshold (from `vocab_freq_file`) for a token to be kept in `input_tensor`. This should correspond with `vocab_freq_dtype`. vocab_subsampling: (Optional) `float` specifying frequency proportion threshold for tokens from `input_tensor`. Tokens that occur more frequently will be randomly down-sampled. Reasonable starting values may be around 1e-3 or 1e-5. See Eq. 5 in http://arxiv.org/abs/1310.4546 for more details. corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the total number of tokens in the corpus (e.g., sum of all the frequency counts of `vocab_freq_file`). Used with `vocab_subsampling` for down-sampling frequently occurring tokens. If this is specified, `vocab_freq_file` and `vocab_subsampling` must also be specified. If `corpus_size` is needed but not supplied, then it will be calculated from `vocab_freq_file`. You might want to supply your own value if you have already eliminated infrequent tokens from your vocabulary files (where frequency < vocab_min_count) to save memory in the internal token lookup table. Otherwise, the unused tokens' variables will waste memory. The user-supplied `corpus_size` value must be greater than or equal to the sum of all the frequency counts of `vocab_freq_file`. min_skips: `int` or scalar `Tensor` specifying the minimum window size to randomly use for each token. Must be >= 0 and <= `max_skips`. If `min_skips` and `max_skips` are both 0, the only label outputted will be the token itself. max_skips: `int` or scalar `Tensor` specifying the maximum window size to randomly use for each token. Must be >= 0. start: `int` or scalar `Tensor` specifying the position in `input_tensor` from which to start generating skip-gram candidates. limit: `int` or scalar `Tensor` specifying the maximum number of elements in `input_tensor` to use in generating skip-gram candidates. -1 means to use the rest of the `Tensor` after `start`. emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit each token as a label for itself. batch_size: (Optional) `int` specifying batch size of returned `Tensors`. batch_capacity: (Optional) `int` specifying batch capacity for the queue used for batching returned `Tensors`. Only has an effect if `batch_size` > 0. Defaults to 100 * `batch_size` if not specified. seed: (Optional) `int` used to create a random seed for window size and subsampling. See [`set_random_seed`](../../g3doc/python/constant_op.md#set_random_seed) for behavior. name: (Optional) A `string` name or a name scope for the operations. Returns: A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of rank-1 and has the same type as `input_tensor`. The `Tensors` will be of length `batch_size`; if `batch_size` is not specified, they will be of random length, though they will be in sync with each other as long as they are evaluated together. Raises: ValueError: If `vocab_token_index` or `vocab_freq_index` is less than 0 or exceeds the number of columns in `vocab_freq_file`. If `vocab_token_index` and `vocab_freq_index` are both set to the same column. If any token in `vocab_freq_file` has a negative frequency. """ if vocab_token_index < 0 or vocab_freq_index < 0: raise ValueError( "vocab_token_index={} and vocab_freq_index={} must both be >= 0.". format(vocab_token_index, vocab_freq_index)) if vocab_token_index == vocab_freq_index: raise ValueError( "vocab_token_index and vocab_freq_index should be different, but are " "both {}.".format(vocab_token_index)) # Iterates through the vocab file and calculates the number of vocab terms as # well as the total corpus size (by summing the frequency counts of all the # vocab terms). calculated_corpus_size = 0.0 vocab_size = 0 with gfile.GFile(vocab_freq_file, mode="r") as f: reader = csv.reader(f, delimiter=vocab_delimiter) for row in reader: if vocab_token_index >= len(row) or vocab_freq_index >= len(row): raise ValueError( "Row in vocab file only has {} columns, so vocab_token_index={} or " "vocab_freq_index={} is out of bounds. Row content: {}".format( len(row), vocab_token_index, vocab_freq_index, row)) vocab_size += 1 freq = vocab_freq_dtype.as_numpy_dtype(row[vocab_freq_index]) if freq < 0: raise ValueError( "Row in vocab file has negative frequency of {}. Row content: {}". format(freq, row)) # Note: tokens whose frequencies are below vocab_min_count will still # contribute to the total corpus size used for vocab subsampling. calculated_corpus_size += freq if not corpus_size: corpus_size = calculated_corpus_size elif calculated_corpus_size - corpus_size > 1e-6: raise ValueError( "`corpus_size`={} must be greater than or equal to the sum of all the " "frequency counts ({}) of `vocab_freq_file` ({}).".format( corpus_size, calculated_corpus_size, vocab_freq_file)) vocab_freq_table = lookup.HashTable( lookup.TextFileInitializer( filename=vocab_freq_file, key_dtype=vocab_token_dtype, key_index=vocab_token_index, value_dtype=vocab_freq_dtype, value_index=vocab_freq_index, vocab_size=vocab_size, delimiter=vocab_delimiter), # For vocab terms not in vocab file, use a default value of -1. default_value=-1) return skip_gram_sample( input_tensor, min_skips=min_skips, max_skips=max_skips, start=start, limit=limit, emit_self_as_target=emit_self_as_target, vocab_freq_table=vocab_freq_table, vocab_min_count=vocab_min_count, vocab_subsampling=vocab_subsampling, # corpus_size is not used unless vocab_subsampling is specified. corpus_size=None if vocab_subsampling is None else corpus_size, batch_size=batch_size, batch_capacity=batch_capacity, seed=seed, name=name) def _filter_input(input_tensor, vocab_freq_table, vocab_min_count, vocab_subsampling, corpus_size, seed): """Filters input tensor based on vocab freq, threshold, and subsampling.""" if vocab_freq_table is None: return input_tensor if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase): raise ValueError( "vocab_freq_table must be a subclass of " "InitializableLookupTableBase (such as HashTable) instead of type " "{}.".format(type(vocab_freq_table))) with ops.name_scope( "filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]): freq = vocab_freq_table.lookup(input_tensor) # Filters out elements in input_tensor that are not found in # vocab_freq_table (table returns a default value of -1 specified above when # an element is not found). mask = math_ops.not_equal(freq, vocab_freq_table.default_value) # Filters out elements whose vocab frequencies are less than the threshold. if vocab_min_count is not None: cast_threshold = math_ops.cast(vocab_min_count, freq.dtype) mask = math_ops.logical_and(mask, math_ops.greater_equal(freq, cast_threshold)) input_tensor = array_ops.boolean_mask(input_tensor, mask) freq = array_ops.boolean_mask(freq, mask) if not vocab_subsampling: return input_tensor if vocab_subsampling < 0 or vocab_subsampling > 1: raise ValueError( "Invalid vocab_subsampling={} - it should be within range [0, 1].". format(vocab_subsampling)) # Subsamples the input tokens based on vocabulary frequency and # vocab_subsampling threshold (ie randomly discard commonly appearing # tokens). with ops.name_scope( "subsample_vocab", values=[input_tensor, freq, vocab_subsampling]): corpus_size = math_ops.cast(corpus_size, dtypes.float64) freq = math_ops.cast(freq, dtypes.float64) vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64) # From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is # suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546. keep_prob = ((math_ops.sqrt(freq / (vocab_subsampling * corpus_size)) + 1.0) * (vocab_subsampling * corpus_size / freq)) random_prob = random_ops.random_uniform( array_ops.shape(freq), minval=0, maxval=1, dtype=dtypes.float64, seed=seed) mask = math_ops.less_equal(random_prob, keep_prob) return array_ops.boolean_mask(input_tensor, mask)
apache-2.0
Vauxoo/account-payment
res_currency_print_on_check/__init__.py
1
1030
# -*- coding: utf-8 -*- ############################################################################## # # Odoo, Open Source Management Solution # Copyright (C) 2010 - 2014 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import models
agpl-3.0
JensTimmerman/radical.pilot
examples/tutorial/coupled_tasks.py
1
8504
#!/usr/bin/env python __copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu" __license__ = "MIT" import sys import radical.pilot as rp """ DESCRIPTION: Tutorial 3: Coupled Tasks For every task A1 and B1 a C1 is started. """ # READ: The RADICAL-Pilot documentation: # http://radicalpilot.readthedocs.org/en/latest # # Try running this example with RADICAL_PILOT_VERBOSE=debug set if # you want to see what happens behind the scences! #------------------------------------------------------------------------------ # def pilot_state_cb (pilot, state): if not pilot: return print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state) if state == rp.FAILED: sys.exit (1) #------------------------------------------------------------------------------ # def unit_state_cb (unit, state): if not unit: return global CNT print "[Callback]: unit %s on %s: %s." % (unit.uid, unit.pilot_id, state) if state == rp.FAILED: print "stderr: %s" % unit.stderr sys.exit(2) #------------------------------------------------------------------------------ # if __name__ == "__main__": # we can optionally pass session name to RP if len(sys.argv) > 1: session_name = sys.argv[1] else: session_name = None # Create a new session. No need to try/except this: if session creation # fails, there is not much we can do anyways... session = rp.Session(name=session_name) print "session id: %s" % session.uid # all other pilot code is now tried/excepted. If an exception is caught, we # can rely on the session object to exist and be valid, and we can thus tear # the whole RP stack down via a 'session.close()' call in the 'finally' # clause... try: # ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------ # # Change the user name below if you are using a remote resource # and your username on that resource is different from the username # on your local machine. # c = rp.Context('userpass') #c.user_id = "tutorial_X" #c.user_pass = "PutYourPasswordHere" session.add_context(c) # Add a Pilot Manager. Pilot managers manage one or more ComputePilots. print "Initializing Pilot Manager ..." pmgr = rp.PilotManager(session=session) # Register our callback with the PilotManager. This callback will get # called every time any of the pilots managed by the PilotManager # change their state. pmgr.register_callback(pilot_state_cb) # ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------ # # If you want to run this example on your local machine, you don't have # to change anything here. # # Change the resource below if you want to run on a remote resource. # You also might have to set the 'project' to your allocation ID if # your remote resource does compute time accounting. # # A list of preconfigured resources can be found at: # http://radicalpilot.readthedocs.org/en/latest/machconf.html#preconfigured-resources # pdesc = rp.ComputePilotDescription () pdesc.resource = "local.localhost" # NOTE: This is a "label", not a hostname pdesc.runtime = 10 # minutes pdesc.cores = 1 pdesc.cleanup = True # submit the pilot. print "Submitting Compute Pilot to Pilot Manager ..." pilot = pmgr.submit_pilots(pdesc) # Combine the ComputePilot, the ComputeUnits and a scheduler via # a UnitManager object. print "Initializing Unit Manager ..." umgr = rp.UnitManager (session=session, scheduler=rp.SCHED_DIRECT_SUBMISSION) # Register our callback with the UnitManager. This callback will get # called every time any of the units managed by the UnitManager # change their state. umgr.register_callback(unit_state_cb) # Add the created ComputePilot to the UnitManager. print "Registering Compute Pilot with Unit Manager ..." umgr.add_pilots(pilot) NUMBER_JOBS = 2 # the total number of CUs to chain # submit A cus to pilot job cudesc_list_A = [] for idx in range(NUMBER_JOBS): # -------- BEGIN USER DEFINED CU 1 DESCRIPTION --------- # cudesc = rp.ComputeUnitDescription() cudesc.environment = {"CU_LIST": "A", "CU_NO": "%02d" % idx} cudesc.executable = "/bin/echo" cudesc.arguments = ['"$CU_LIST CU with id $CU_NO"'] cudesc.cores = 1 # -------- END USER DEFINED CU 1 DESCRIPTION --------- # cudesc_list_A.append(cudesc) # Submit the previously created ComputeUnit descriptions to the # PilotManager. This will trigger the selected scheduler to start # assigning ComputeUnits to the ComputePilots. print "Submit Compute Units 'A' to Unit Manager ..." cu_set_A = umgr.submit_units(cudesc_list_A) # submit B cus to pilot job cudesc_list_B = [] for idx in range(NUMBER_JOBS): # -------- BEGIN USER DEFINED CU 2 DESCRIPTION --------- # cudesc = rp.ComputeUnitDescription() cudesc.environment = {"CU_LIST": "B", "CU_NO": "%02d" % idx} cudesc.executable = "/bin/echo" cudesc.arguments = ['"$CU_LIST CU with id $CU_NO"'] cudesc.cores = 1 # -------- END USER DEFINED CU 2 DESCRIPTION --------- # cudesc_list_B.append(cudesc) # Submit the previously created ComputeUnit descriptions to the # PilotManager. This will trigger the selected scheduler to start # assigning ComputeUnits to the ComputePilots. print "Submit Compute Units 'B' to Unit Manager ..." cu_set_B = umgr.submit_units(cudesc_list_B) # --------------------------------------------------------------------- print "Waiting for 'A' and 'B' CUs to complete..." umgr.wait_units() print "Executing 'C' tasks now..." # --------------------------------------------------------------------- # submit 'C' tasks to pilot job. each 'C' task takes the output of # an 'A' and a 'B' task and puts them together. cudesc_list_C = [] for idx in range(NUMBER_JOBS): # -------- BEGIN USER DEFINED CU 3 DESCRIPTION --------- # cudesc = rp.ComputeUnitDescription() cudesc.environment = {"CU_SET": "C", "CU_NO": "%02d" % idx} cudesc.executable = "/bin/echo" cudesc.arguments = ['"$CU_SET CU with id $CU_NO"'] cudesc.cores = 1 # -------- END USER DEFINED CU 3 DESCRIPTION --------- # cudesc_list_C.append(cudesc) # Submit the previously created ComputeUnit descriptions to the # PilotManager. This will trigger the selected scheduler to start # assigning ComputeUnits to the ComputePilots. print "Submit Compute Units 'C' to Unit Manager ..." cu_set_C = umgr.submit_units(cudesc_list_C) # --------------------------------------------------------------------- print "Waiting for 'C' CUs to complete..." umgr.wait_units() print "All CUs completed successfully!" except Exception as e: # Something unexpected happened in the pilot code above print "caught Exception: %s" % e raise except (KeyboardInterrupt, SystemExit) as e: # the callback called sys.exit(), and we can here catch the # corresponding KeyboardInterrupt exception for shutdown. We also catch # SystemExit (which gets raised if the main threads exits for some other # reason). print "need to exit now: %s" % e finally: # always clean up the session, no matter if we caught an exception or # not. print "closing session" session.close () # the above is equivalent to # # session.close (cleanup=True, terminate=True) # # it will thus both clean out the session's database record, and kill # all remaining pilots (none in our example). #-------------------------------------------------------------------------------
mit
googleapis/python-managed-identities
google/cloud/managedidentities/__init__.py
1
2912
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.cloud.managedidentities_v1.services.managed_identities_service.client import ( ManagedIdentitiesServiceClient, ) from google.cloud.managedidentities_v1.services.managed_identities_service.async_client import ( ManagedIdentitiesServiceAsyncClient, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( AttachTrustRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( CreateMicrosoftAdDomainRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( DeleteDomainRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( DetachTrustRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( GetDomainRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ListDomainsRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ListDomainsResponse, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( OpMetadata, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ReconfigureTrustRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ResetAdminPasswordRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ResetAdminPasswordResponse, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( UpdateDomainRequest, ) from google.cloud.managedidentities_v1.types.managed_identities_service import ( ValidateTrustRequest, ) from google.cloud.managedidentities_v1.types.resource import Domain from google.cloud.managedidentities_v1.types.resource import Trust __all__ = ( "ManagedIdentitiesServiceClient", "ManagedIdentitiesServiceAsyncClient", "AttachTrustRequest", "CreateMicrosoftAdDomainRequest", "DeleteDomainRequest", "DetachTrustRequest", "GetDomainRequest", "ListDomainsRequest", "ListDomainsResponse", "OpMetadata", "ReconfigureTrustRequest", "ResetAdminPasswordRequest", "ResetAdminPasswordResponse", "UpdateDomainRequest", "ValidateTrustRequest", "Domain", "Trust", )
apache-2.0
gxyang/hstore
third_party/cpp/protobuf/python/setup.py
40
4737
#! /usr/bin/python # # See README for usage instructions. # We must use setuptools, not distutils, because we need to use the # namespace_packages option for the "google" package. from ez_setup import use_setuptools use_setuptools() from setuptools import setup from distutils.spawn import find_executable import sys import os import subprocess maintainer_email = "protobuf@googlegroups.com" # Find the Protocol Compiler. if os.path.exists("../src/protoc"): protoc = "../src/protoc" elif os.path.exists("../src/protoc.exe"): protoc = "../src/protoc.exe" else: protoc = find_executable("protoc") def generate_proto(source): """Invokes the Protocol Compiler to generate a _pb2.py from the given .proto file. Does nothing if the output already exists and is newer than the input.""" output = source.replace(".proto", "_pb2.py").replace("../src/", "") if not os.path.exists(source): print "Can't find required file: " + source sys.exit(-1) if (not os.path.exists(output) or (os.path.exists(source) and os.path.getmtime(source) > os.path.getmtime(output))): print "Generating %s..." % output if protoc == None: sys.stderr.write( "protoc is not installed nor found in ../src. Please compile it " "or install the binary package.\n") sys.exit(-1) protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ] if subprocess.call(protoc_command) != 0: sys.exit(-1) def MakeTestSuite(): # This is apparently needed on some systems to make sure that the tests # work even if a previous version is already installed. if 'google' in sys.modules: del sys.modules['google'] generate_proto("../src/google/protobuf/unittest.proto") generate_proto("../src/google/protobuf/unittest_import.proto") generate_proto("../src/google/protobuf/unittest_mset.proto") generate_proto("../src/google/protobuf/unittest_no_generic_services.proto") generate_proto("google/protobuf/internal/more_extensions.proto") generate_proto("google/protobuf/internal/more_messages.proto") import unittest import google.protobuf.internal.generator_test as generator_test import google.protobuf.internal.descriptor_test as descriptor_test import google.protobuf.internal.reflection_test as reflection_test import google.protobuf.internal.service_reflection_test \ as service_reflection_test import google.protobuf.internal.text_format_test as text_format_test import google.protobuf.internal.wire_format_test as wire_format_test loader = unittest.defaultTestLoader suite = unittest.TestSuite() for test in [ generator_test, descriptor_test, reflection_test, service_reflection_test, text_format_test, wire_format_test ]: suite.addTest(loader.loadTestsFromModule(test)) return suite if __name__ == '__main__': # TODO(kenton): Integrate this into setuptools somehow? if len(sys.argv) >= 2 and sys.argv[1] == "clean": # Delete generated _pb2.py files and .pyc files in the code tree. for (dirpath, dirnames, filenames) in os.walk("."): for filename in filenames: filepath = os.path.join(dirpath, filename) if filepath.endswith("_pb2.py") or filepath.endswith(".pyc"): os.remove(filepath) else: # Generate necessary .proto file if it doesn't exist. # TODO(kenton): Maybe we should hook this into a distutils command? generate_proto("../src/google/protobuf/descriptor.proto") setup(name = 'protobuf', version = '2.3.0', packages = [ 'google' ], namespace_packages = [ 'google' ], test_suite = 'setup.MakeTestSuite', # Must list modules explicitly so that we don't install tests. py_modules = [ 'google.protobuf.internal.containers', 'google.protobuf.internal.decoder', 'google.protobuf.internal.encoder', 'google.protobuf.internal.message_listener', 'google.protobuf.internal.type_checkers', 'google.protobuf.internal.wire_format', 'google.protobuf.descriptor', 'google.protobuf.descriptor_pb2', 'google.protobuf.message', 'google.protobuf.reflection', 'google.protobuf.service', 'google.protobuf.service_reflection', 'google.protobuf.text_format' ], url = 'http://code.google.com/p/protobuf/', maintainer = maintainer_email, maintainer_email = 'protobuf@googlegroups.com', license = 'New BSD License', description = 'Protocol Buffers', long_description = "Protocol Buffers are Google's data interchange format.", )
gpl-3.0
wimac/home
Dropbox/skel/bin/sick-beard/cherrypy/_cpthreadinglocal.py
36
6855
# This is a backport of Python-2.4's threading.local() implementation """Thread-local objects (Note that this module provides a Python version of thread threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the local class from threading.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = mydata.__dict__.items() ... items.sort() ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ # Threading import is at end class _localbase(object): __slots__ = '_local__key', '_local__args', '_local__lock' def __new__(cls, *args, **kw): self = object.__new__(cls) key = 'thread.local.' + str(id(self)) object.__setattr__(self, '_local__key', key) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) if args or kw and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. dict = object.__getattribute__(self, '__dict__') currentThread().__dict__[key] = dict return self def _patch(self): key = object.__getattribute__(self, '_local__key') d = currentThread().__dict__.get(key) if d is None: d = {} currentThread().__dict__[key] = d object.__setattr__(self, '__dict__', d) # we have a new instance dict, so call out __init__ if we have # one cls = type(self) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(self, '_local__args') cls.__init__(self, *args, **kw) else: object.__setattr__(self, '__dict__', d) class local(_localbase): def __getattribute__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__getattribute__(self, name) finally: lock.release() def __setattr__(self, name, value): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__setattr__(self, name, value) finally: lock.release() def __delattr__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__delattr__(self, name) finally: lock.release() def __del__(): threading_enumerate = enumerate __getattribute__ = object.__getattribute__ def __del__(self): key = __getattribute__(self, '_local__key') try: threads = list(threading_enumerate()) except: # if enumerate fails, as it seems to do during # shutdown, we'll skip cleanup under the assumption # that there is nothing to clean up return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: # Thread is dying, rest in peace continue if key in __dict__: try: del __dict__[key] except KeyError: pass # didn't have anything in this thread return __del__ __del__ = __del__() from threading import currentThread, enumerate, RLock
gpl-2.0
trolldbois/python-haystack
test/haystack/test_argparse_utils.py
1
1657
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging import unittest import sys import argparse from haystack import argparse_utils class Test(unittest.TestCase): def test_readable(self): """test the readable helper.""" invalid = '/345678ui0d9t921giv9' self.assertRaises(argparse.ArgumentTypeError, argparse_utils.readable, invalid) valid = sys.modules[__name__].__file__ self.assertEqual(argparse_utils.readable(valid), valid) return def test_writeable(self): """test the writeable helper.""" invalid = '/345678ui0d9t921giv9/qwf89/2/4r/ef/23/23g/' self.assertRaises(argparse.ArgumentTypeError, argparse_utils.writeable, invalid) valid = sys.modules[__name__].__file__ self.assertEqual(argparse_utils.writeable(valid), valid) return def test_int16(self): """test the int16 helper.""" invalid = '/345678ui0d9t921giv9' self.assertRaises(argparse.ArgumentTypeError, argparse_utils.int16, invalid) invalid = sys.modules[__name__].__file__ self.assertRaises(argparse.ArgumentTypeError, argparse_utils.int16, invalid) valid = '0x01293' self.assertEqual(argparse_utils.int16(valid), 0x01293) return if __name__ == '__main__': logging.basicConfig(level=logging.WARNING) #logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) # logging.getLogger('basicmodel').setLevel(level=logging.DEBUG) # logging.getLogger('model').setLevel(level=logging.DEBUG) # logging.getLogger('memory_mapping').setLevel(level=logging.INFO) unittest.main(verbosity=2)
gpl-3.0
mzdaniel/oh-mainline
vendor/packages/Django/tests/modeltests/model_inheritance/models.py
118
3888
""" XX. Model inheritance Model inheritance exists in two varieties: - abstract base classes which are a way of specifying common information inherited by the subclasses. They don't exist as a separate model. - non-abstract base classes (the default), which are models in their own right with their own database tables and everything. Their subclasses have references back to them, created automatically. Both styles are demonstrated here. """ from django.db import models # # Abstract base classes # class CommonInfo(models.Model): name = models.CharField(max_length=50) age = models.PositiveIntegerField() class Meta: abstract = True ordering = ['name'] def __unicode__(self): return u'%s %s' % (self.__class__.__name__, self.name) class Worker(CommonInfo): job = models.CharField(max_length=50) class Student(CommonInfo): school_class = models.CharField(max_length=10) class Meta: pass class StudentWorker(Student, Worker): pass # # Abstract base classes with related models # class Post(models.Model): title = models.CharField(max_length=50) class Attachment(models.Model): post = models.ForeignKey(Post, related_name='attached_%(class)s_set') content = models.TextField() class Meta: abstract = True def __unicode__(self): return self.content class Comment(Attachment): is_spam = models.BooleanField() class Link(Attachment): url = models.URLField() # # Multi-table inheritance # class Chef(models.Model): name = models.CharField(max_length=50) def __unicode__(self): return u"%s the chef" % self.name class Place(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=80) def __unicode__(self): return u"%s the place" % self.name class Rating(models.Model): rating = models.IntegerField(null=True, blank=True) class Meta: abstract = True ordering = ['-rating'] class Restaurant(Place, Rating): serves_hot_dogs = models.BooleanField() serves_pizza = models.BooleanField() chef = models.ForeignKey(Chef, null=True, blank=True) class Meta(Rating.Meta): db_table = 'my_restaurant' def __unicode__(self): return u"%s the restaurant" % self.name class ItalianRestaurant(Restaurant): serves_gnocchi = models.BooleanField() def __unicode__(self): return u"%s the italian restaurant" % self.name class Supplier(Place): customers = models.ManyToManyField(Restaurant, related_name='provider') def __unicode__(self): return u"%s the supplier" % self.name class ParkingLot(Place): # An explicit link to the parent (we can control the attribute name). parent = models.OneToOneField(Place, primary_key=True, parent_link=True) main_site = models.ForeignKey(Place, related_name='lot') def __unicode__(self): return u"%s the parking lot" % self.name # # Abstract base classes with related models where the sub-class has the # same name in a different app and inherits from the same abstract base # class. # NOTE: The actual API tests for the following classes are in # model_inheritance_same_model_name/models.py - They are defined # here in order to have the name conflict between apps # class Title(models.Model): title = models.CharField(max_length=50) class NamedURL(models.Model): title = models.ForeignKey(Title, related_name='attached_%(app_label)s_%(class)s_set') url = models.URLField() class Meta: abstract = True class Copy(NamedURL): content = models.TextField() def __unicode__(self): return self.content class Mixin(object): def __init__(self): self.other_attr = 1 super(Mixin, self).__init__() class MixinModel(models.Model, Mixin): pass
agpl-3.0
Entropy512/libsigrokdecode
decoders/z80/__init__.py
6
1318
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2014 Daniel Elstner <daniel.kitta@gmail.com> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## ''' The Zilog Z80 is an 8-bit microprocessor compatible with the Intel 8080. In addition to the 8-bit data bus, this decoder requires the input signals /M1 (machine cycle), /RD (read) and /WR (write) to do its work. An explicit clock signal is not required. However, the Z80 CPU clock may be used as sampling clock, if applicable. Notes on the Z80 opcode format and descriptions of both documented and "undocumented" opcodes are available here: Details: http://www.z80.info/decoding.htm http://clrhome.org/table/ ''' from .pd import Decoder
gpl-3.0
eengl/pytdlpack
test/test_create_new_grid.py
1
2265
#!/usr/bin/env python3 # ---------------------------------------------------------------------------------------- # Import Modules # ---------------------------------------------------------------------------------------- import numpy as np import setuptools import sys platform = setuptools.distutils.util.get_platform() build_path = './build/lib.'+platform+'-'+str(sys.version_info.major)+'.'+str(sys.version_info.minor) sys.path.insert(0,build_path) import pytdlpack # ---------------------------------------------------------------------------------------- # Create some data # ---------------------------------------------------------------------------------------- nx = 2345 ny = 1597 date = 2019052900 id = [4210008,10,24,0] grid_data = np.random.rand(nx,ny)*75.0 grid_data.fill(np.nan) # ---------------------------------------------------------------------------------------- # Grid Specs: CONUS Lambert-Conformal 2.5km 2345x1597 # ---------------------------------------------------------------------------------------- griddef = pytdlpack.create_grid_definition(proj=3,nx=nx,ny=ny,latll=19.2290, lonll=233.7234,orientlon=265.,stdlat=25.,meshlength=2.539703) # ---------------------------------------------------------------------------------------- # Create TDLPACK data record and pack # ---------------------------------------------------------------------------------------- rec = pytdlpack.TdlpackRecord(date=date,id=id,lead=24,plain="GFS WIND SPEED", data=grid_data,missing_value=9999.0,grid=griddef) rec.pack(dec_scale=3) # ---------------------------------------------------------------------------------------- # Open new sequential file and write the records # ---------------------------------------------------------------------------------------- f = pytdlpack.open('new_grid.sq',mode='w',format='sequential') f.write(rec) f.close() # ---------------------------------------------------------------------------------------- # Open new random-access file and write the records # ---------------------------------------------------------------------------------------- fra = pytdlpack.open('new_grid.ra',mode='w',format='random-access',ra_template='large') fra.write(rec) fra.close()
gpl-3.0
Jumpscale/jumpscale_portal8
apps/gridportal/base/Grid/.macros/page/adminjumpscripts/3_adminjumpscripts.py
1
1480
def main(j, args, params, tags, tasklet): def _formatdata(jumpscripts): aaData = list() for name, jumpscript in jumpscripts.items(): itemdata = ['<a href=adminjumpscript?name=%s>%s</a>' % (name, name)] for field in ['organization', 'version', 'descr']: #code itemdata.append(str(jumpscript[field])) aaData.append(itemdata) return aaData cl=j.clients.redis.getGeventRedisClient("localhost", 7770) if not j.application.config.exists("grid.watchdog.secret") or j.application.config.exists("grid.watchdog.secret") == "": page = args.page page.addMessage('* no grid configured for watchdog: hrd:grid.watchdog.secret') params.result = page return params key = "%s:admin:jscripts" % j.application.config.get("grid.watchdog.secret") scripts = cl.hgetall(key) jumpscripts = dict([(scripts[i], j.data.serializer.json.loads(scripts[i+1])) for i, _ in enumerate(scripts) if i % 2 == 0]) jscripts = _formatdata(jumpscripts) page = args.page modifier = j.portal.tools.html.getPageModifierGridDataTables(page) fieldnames = ('Name', 'Organization', 'Version', 'Description') tableid = modifier.addTableFromData(jscripts, fieldnames) modifier.addSearchOptions('#%s' % tableid) modifier.addSorting('#%s' % tableid, 0, 'desc') params.result = page return params def match(j, args, params, tags, tasklet): return True
apache-2.0
ingokegel/intellij-community
python/helpers/pycharm/buildout_engulfer.py
85
1225
# Expects two env variables: # PYCHARM_ENGULF_SCRIPT = which script should be engulfed. # PYCHARM_PREPEND_SYSPATH = which entries should be added to the beginning of sys.path; # items must be separated by path separator. May be unset. # # Given script is loaded and compiled, then sys.path is prepended as requested. # On win32, getpass is changed to insecure but working version. # Then the compiled script evaluated, as if it were run by python interpreter itself. # Works OK with debugger. import os import sys target = os.getenv("PYCHARM_ENGULF_SCRIPT") print("Running script through buildout: " + target) assert target, "PYCHARM_ENGULF_SCRIPT must be set" filepath = os.path.abspath(target) f = None try: f = open(filepath, "r") source = "\n".join((s.rstrip() for s in f.readlines())) finally: if f: f.close() from fix_getpass import fixGetpass fixGetpass() #prependable = os.getenv("PYCHARM_PREPEND_SYSPATH") #if prependable: # sys.path[0:0] = [x for x in prependable.split(os.path.pathsep)] # include engulfed's path, everyone expects this our_path = os.path.dirname(filepath) if our_path not in sys.path: sys.path.append(our_path) compile(source, target, "exec") exec(source) # here we come
apache-2.0
CTFd/CTFd
tests/api/v1/users/test_scoring.py
4
1454
#!/usr/bin/env python # -*- coding: utf-8 -*- from CTFd.models import Users from CTFd.utils import set_config from tests.helpers import ( create_ctfd, destroy_ctfd, login_as_user, register_user, simulate_user_activity, ) def test_api_user_place_hidden_if_scores_hidden(): """/api/v1/users/me should not reveal user place if scores aren't visible""" app = create_ctfd() with app.app_context(): register_user(app) user = Users.query.filter_by(id=2).first() simulate_user_activity(app.db, user=user) with login_as_user(app, name="user") as client: r = client.get("/api/v1/users/me", json="") resp = r.get_json() assert resp["data"]["place"] == "1st" set_config("score_visibility", "hidden") with login_as_user(app, name="user") as client: r = client.get("/api/v1/users/me", json="") resp = r.get_json() assert resp["data"]["place"] is None set_config("score_visibility", "admins") with login_as_user(app, name="user") as client: r = client.get("/api/v1/users/me", json="") resp = r.get_json() assert resp["data"]["place"] is None with login_as_user(app, name="admin") as client: r = client.get("/api/v1/users/2", json="") resp = r.get_json() assert resp["data"]["place"] == "1st" destroy_ctfd(app)
apache-2.0
Sweetgrassbuffalo/ReactionSweeGrass-v2
.meteor/local/dev_bundle/python/Lib/colorsys.py
1066
3691
"""Conversion functions between RGB and other color systems. This modules provides two functions for each color system ABC: rgb_to_abc(r, g, b) --> a, b, c abc_to_rgb(a, b, c) --> r, g, b All inputs and outputs are triples of floats in the range [0.0...1.0] (with the exception of I and Q, which covers a slightly larger range). Inputs outside the valid range may cause exceptions or invalid outputs. Supported color systems: RGB: Red, Green, Blue components YIQ: Luminance, Chrominance (used by composite video signals) HLS: Hue, Luminance, Saturation HSV: Hue, Saturation, Value """ # References: # http://en.wikipedia.org/wiki/YIQ # http://en.wikipedia.org/wiki/HLS_color_space # http://en.wikipedia.org/wiki/HSV_color_space __all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", "rgb_to_hsv","hsv_to_rgb"] # Some floating point constants ONE_THIRD = 1.0/3.0 ONE_SIXTH = 1.0/6.0 TWO_THIRD = 2.0/3.0 # YIQ: used by composite video signals (linear combinations of RGB) # Y: perceived grey level (0.0 == black, 1.0 == white) # I, Q: color components def rgb_to_yiq(r, g, b): y = 0.30*r + 0.59*g + 0.11*b i = 0.60*r - 0.28*g - 0.32*b q = 0.21*r - 0.52*g + 0.31*b return (y, i, q) def yiq_to_rgb(y, i, q): r = y + 0.948262*i + 0.624013*q g = y - 0.276066*i - 0.639810*q b = y - 1.105450*i + 1.729860*q if r < 0.0: r = 0.0 if g < 0.0: g = 0.0 if b < 0.0: b = 0.0 if r > 1.0: r = 1.0 if g > 1.0: g = 1.0 if b > 1.0: b = 1.0 return (r, g, b) # HLS: Hue, Luminance, Saturation # H: position in the spectrum # L: color lightness # S: color saturation def rgb_to_hls(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) # XXX Can optimize (maxc+minc) and (maxc-minc) l = (minc+maxc)/2.0 if minc == maxc: return 0.0, l, 0.0 if l <= 0.5: s = (maxc-minc) / (maxc+minc) else: s = (maxc-minc) / (2.0-maxc-minc) rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, l, s def hls_to_rgb(h, l, s): if s == 0.0: return l, l, l if l <= 0.5: m2 = l * (1.0+s) else: m2 = l+s-(l*s) m1 = 2.0*l - m2 return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) def _v(m1, m2, hue): hue = hue % 1.0 if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0 if hue < 0.5: return m2 if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 return m1 # HSV: Hue, Saturation, Value # H: position in the spectrum # S: color saturation ("purity") # V: color brightness def rgb_to_hsv(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) v = maxc if minc == maxc: return 0.0, 0.0, v s = (maxc-minc) / maxc rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, s, v def hsv_to_rgb(h, s, v): if s == 0.0: return v, v, v i = int(h*6.0) # XXX assume int() truncates! f = (h*6.0) - i p = v*(1.0 - s) q = v*(1.0 - s*f) t = v*(1.0 - s*(1.0-f)) i = i%6 if i == 0: return v, t, p if i == 1: return q, v, p if i == 2: return p, v, t if i == 3: return p, q, v if i == 4: return t, p, v if i == 5: return v, p, q # Cannot get here
gpl-3.0
gauravbose/digital-menu
django/templatetags/static.py
250
4055
from django import template from django.utils.encoding import iri_to_uri from django.utils.six.moves.urllib.parse import urljoin register = template.Library() class PrefixNode(template.Node): def __repr__(self): return "<PrefixNode for %r>" % self.name def __init__(self, varname=None, name=None): if name is None: raise template.TemplateSyntaxError( "Prefix nodes must be given a name to return.") self.varname = varname self.name = name @classmethod def handle_token(cls, parser, token, name): """ Class method to parse prefix node and return a Node. """ # token.split_contents() isn't useful here because tags using this method don't accept variable as arguments tokens = token.contents.split() if len(tokens) > 1 and tokens[1] != 'as': raise template.TemplateSyntaxError( "First argument in '%s' must be 'as'" % tokens[0]) if len(tokens) > 1: varname = tokens[2] else: varname = None return cls(varname, name) @classmethod def handle_simple(cls, name): try: from django.conf import settings except ImportError: prefix = '' else: prefix = iri_to_uri(getattr(settings, name, '')) return prefix def render(self, context): prefix = self.handle_simple(self.name) if self.varname is None: return prefix context[self.varname] = prefix return '' @register.tag def get_static_prefix(parser, token): """ Populates a template variable with the static prefix, ``settings.STATIC_URL``. Usage:: {% get_static_prefix [as varname] %} Examples:: {% get_static_prefix %} {% get_static_prefix as static_prefix %} """ return PrefixNode.handle_token(parser, token, "STATIC_URL") @register.tag def get_media_prefix(parser, token): """ Populates a template variable with the media prefix, ``settings.MEDIA_URL``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %} """ return PrefixNode.handle_token(parser, token, "MEDIA_URL") class StaticNode(template.Node): def __init__(self, varname=None, path=None): if path is None: raise template.TemplateSyntaxError( "Static template nodes must be given a path to return.") self.path = path self.varname = varname def url(self, context): path = self.path.resolve(context) return self.handle_simple(path) def render(self, context): url = self.url(context) if self.varname is None: return url context[self.varname] = url return '' @classmethod def handle_simple(cls, path): return urljoin(PrefixNode.handle_simple("STATIC_URL"), path) @classmethod def handle_token(cls, parser, token): """ Class method to parse prefix node and return a Node. """ bits = token.split_contents() if len(bits) < 2: raise template.TemplateSyntaxError( "'%s' takes at least one argument (path to file)" % bits[0]) path = parser.compile_filter(bits[1]) if len(bits) >= 2 and bits[-2] == 'as': varname = bits[3] else: varname = None return cls(varname, path) @register.tag('static') def do_static(parser, token): """ Joins the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static "myapp/css/base.css" %} {% static variable_with_path %} {% static "myapp/css/base.css" as admin_base_css %} {% static variable_with_path as varname %} """ return StaticNode.handle_token(parser, token) def static(path): return StaticNode.handle_simple(path)
bsd-3-clause
kvar/ansible
lib/ansible/plugins/httpapi/fortios.py
11
5606
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2019 Fortinet, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: - Miguel Angel Munoz (@magonzalez) httpapi : fortios short_description: HttpApi Plugin for Fortinet FortiOS Appliance or VM description: - This HttpApi plugin provides methods to connect to Fortinet FortiOS Appliance or VM via REST API version_added: "2.9" """ from ansible.plugins.httpapi import HttpApiBase from ansible.module_utils.basic import to_text from ansible.module_utils.six.moves import urllib import json import re class HttpApi(HttpApiBase): def __init__(self, connection): super(HttpApi, self).__init__(connection) self._ccsrftoken = '' def set_become(self, become_context): """ Elevation is not required on Fortinet devices - Skipped :param become_context: Unused input. :return: None """ return None def login(self, username, password): """Call a defined login endpoint to receive an authentication token.""" data = "username=" + urllib.parse.quote(username) + "&secretkey=" + urllib.parse.quote(password) + "&ajax=1" dummy, result_data = self.send_request(url='/logincheck', data=data, method='POST') if result_data[0] != '1': raise Exception('Wrong credentials. Please check') def logout(self): """ Call to implement session logout.""" self.send_request(url='/logout', method="POST") def update_auth(self, response, response_text): """ Get cookies and obtain value for csrftoken that will be used on next requests :param response: Response given by the server. :param response_text Unused_input. :return: Dictionary containing headers """ headers = {} resp_raw_headers = [] if hasattr(response.headers, '_headers'): resp_raw_headers = response.headers._headers else: resp_raw_headers = [(attr, response.headers[attr]) for attr in response.headers] for attr, val in resp_raw_headers: if attr.lower() == 'set-cookie' and 'APSCOOKIE_' in val: headers['Cookie'] = val # XXX: In urllib2 all the 'set-cookie' headers are coalesced into one x_ccsrftoken_position = val.find('ccsrftoken=') if x_ccsrftoken_position != -1: token_string = val[x_ccsrftoken_position + len('ccsrftoken='):].split('\"')[1] self._ccsrftoken = token_string elif attr.lower() == 'set-cookie' and 'ccsrftoken=' in val: csrftoken_search = re.search('\"(.*)\"', val) if csrftoken_search: self._ccsrftoken = csrftoken_search.group(1) headers['x-csrftoken'] = self._ccsrftoken return headers def handle_httperror(self, exc): """ Not required on Fortinet devices - Skipped :param exc: Unused input. :return: exc """ return exc def send_request(self, **message_kwargs): """ Responsible for actual sending of data to the connection httpapi base plugin. :param message_kwargs: A formatted dictionary containing request info: url, data, method :return: Status code and response data. """ url = message_kwargs.get('url', '/') data = message_kwargs.get('data', '') method = message_kwargs.get('method', 'GET') try: response, response_data = self.connection.send(url, data, method=method) response_status = None if hasattr(response, 'status'): response_status = response.status else: response_status = response.headers.status return response_status, to_text(response_data.getvalue()) except Exception as err: raise Exception(err)
gpl-3.0
epam/JDI
Python/Test/jdi_uitest_web_examples/main/site/epam/epam_site.py
3
1872
from JDI.core.interfaces.check_page_types import CheckPageTypes from JDI.web.selenium.elements.api_interact.find_element_by import By from JDI.web.selenium.elements.common.button import Button from JDI.web.selenium.elements.complex.menu import Menu from JDI.web.selenium.elements.composite.web_site import WebSite from Test.jdi_uitest_web_examples.main.enums.enums import HeaderMenu, HeaderSolutionsMenu from Test.jdi_uitest_web_examples.main.site.epam.pages import HomePage, CareerPage, ProductDevelopmentPage, \ JobListingPage, JobDescriptionPage class EpamSite(WebSite): home_page = HomePage(url="/", title="EPAM | Software Product Development Services") career_page = CareerPage(url="/careers", title="Careers") product_development_page = ProductDevelopmentPage(url="/solutions/core-engineering/product-development") job_listing_page = JobListingPage( url="/careers/job-listings?sort=best_match&query=Engineer&department=Software+Test+Engineering&city=St-Petersburg&country=Russia", url_template="/careers/job-listings", title="Job Listings", url_check_type=CheckPageTypes.CONTAINS, title_check_type=CheckPageTypes.CONTAINS) job_description_page = None #JobDescriptionPage( # url="/careers/job-listings/job.11584#apply", urlTemplate=".*/careers/job-listings/job\\.\\d*#apply", # urlCheckType=CheckPageTypes.MATCH) header_menu = Menu(by_all_options_names_locator=By.css(".tile-menu>li>a"), parametrized_class=HeaderMenu) multiple_header_menu = Menu(by_menu_levels_locators=[By.css(".tile-menu>li>a"), By.xpath("//*[@class='tile-menu']//*[@href='/solutions']//..//li")]) line_menu = None#JDIElements(By.css(".tile-menu>li>a"), Button) header_solutions_menu = None #Menu(By.css(".tile-menu .submenu a"), HeaderSolutionsMenu)
gpl-3.0
RyanDJLee/pyta
nodes/AsyncFunctionDef.py
1
1078
""" AsyncFunctionDef astroid node Subclass of FunctionDef astroid node. An async def function definition and used for async astroid nodes like AsyncFor and AsyncWith. Attributes: - name (str) - The function's name. - args (Arguments) - An arguments node. See Arguments.py for more details. - doc (str) - The docstring of the function. - body (List[Node]) - The list of nodes inside the function. - decorators (Decorator) - The decorator to be applied on this function. - returns (None) - The return annotation. Example: - name -> animal - args -> arguments(args=[], vararg=None, kwonlyargs= [arg(arg='arg')],kw_defaults=[], kwarg=None, defaults=[]) - doc -> "This is function animal." - body -> [Assign(dog, "an animal")] - decorators -> @wrapper - returns -> return dog """ @wrapper async def animal(arg): """ This is function animal. """ dog = "an animal" return dog
gpl-3.0
vishnugonela/devstack
tools/outfilter.py
51
2650
#!/usr/bin/env python # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an output filter to filter and timestamp the logs from Grenade and # DevStack. Largely our awk filters got beyond the complexity level which were # sustainable, so this provides us much more control in a single place. # # The overhead of running python should be less than execing `date` a million # times during a run. import argparse import datetime import re import sys IGNORE_LINES = re.compile('(set \+o|xtrace)') HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): parser = argparse.ArgumentParser( description='Filter output by DevStack and friends') parser.add_argument('-o', '--outfile', help='Output file for content', default=None) parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() def skip_line(line): """Should we skip this line.""" return IGNORE_LINES.search(line) is not None def main(): opts = get_options() outfile = None if opts.outfile: outfile = open(opts.outfile, 'a', 0) # Otherwise fileinput reprocess args as files sys.argv = [] while True: line = sys.stdin.readline() if not line: return 0 # put skip lines here if skip_line(line): continue # This prevents us from nesting date lines, because # we'd like to pull this in directly in Grenade and not double # up on DevStack lines if HAS_DATE.search(line) is None: now = datetime.datetime.utcnow() line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) if opts.verbose: sys.stdout.write(line) sys.stdout.flush() if outfile: outfile.write(line) outfile.flush() if __name__ == '__main__': try: sys.exit(main()) except KeyboardInterrupt: sys.exit(1)
apache-2.0
yd0str/infernal-twin
build/pip/pip/_vendor/distlib/resources.py
224
10615
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import bisect import io import logging import os import pkgutil import shutil import sys import types import zipimport from . import DistlibException from .util import cached_property, get_cache_base, path_to_cache_dir, Cache logger = logging.getLogger(__name__) cache = None # created when needed class ResourceCache(Cache): def __init__(self, base=None): if base is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('resource-cache')) super(ResourceCache, self).__init__(base) def is_stale(self, resource, path): """ Is the cache stale for the given resource? :param resource: The :class:`Resource` being cached. :param path: The path of the resource in the cache. :return: True if the cache is stale. """ # Cache invalidation is a hard problem :-) return True def get(self, resource): """ Get a resource into the cache, :param resource: A :class:`Resource` instance. :return: The pathname of the resource in the cache. """ prefix, path = resource.finder.get_cache_info(resource) if prefix is None: result = path else: result = os.path.join(self.base, self.prefix_to_dir(prefix), path) dirname = os.path.dirname(result) if not os.path.isdir(dirname): os.makedirs(dirname) if not os.path.exists(result): stale = True else: stale = self.is_stale(resource, path) if stale: # write the bytes of the resource to the cache location with open(result, 'wb') as f: f.write(resource.bytes) return result class ResourceBase(object): def __init__(self, finder, name): self.finder = finder self.name = name class Resource(ResourceBase): """ A class representing an in-package resource, such as a data file. This is not normally instantiated by user code, but rather by a :class:`ResourceFinder` which manages the resource. """ is_container = False # Backwards compatibility def as_stream(self): """ Get the resource as a stream. This is not a property to make it obvious that it returns a new stream each time. """ return self.finder.get_stream(self) @cached_property def file_path(self): global cache if cache is None: cache = ResourceCache() return cache.get(self) @cached_property def bytes(self): return self.finder.get_bytes(self) @cached_property def size(self): return self.finder.get_size(self) class ResourceContainer(ResourceBase): is_container = True # Backwards compatibility @cached_property def resources(self): return self.finder.get_resources(self) class ResourceFinder(object): """ Resource finder for file system resources. """ if sys.platform.startswith('java'): skipped_extensions = ('.pyc', '.pyo', '.class') else: skipped_extensions = ('.pyc', '.pyo') def __init__(self, module): self.module = module self.loader = getattr(module, '__loader__', None) self.base = os.path.dirname(getattr(module, '__file__', '')) def _adjust_path(self, path): return os.path.realpath(path) def _make_path(self, resource_name): # Issue #50: need to preserve type of path on Python 2.x # like os.path._get_sep if isinstance(resource_name, bytes): # should only happen on 2.x sep = b'/' else: sep = '/' parts = resource_name.split(sep) parts.insert(0, self.base) result = os.path.join(*parts) return self._adjust_path(result) def _find(self, path): return os.path.exists(path) def get_cache_info(self, resource): return None, resource.path def find(self, resource_name): path = self._make_path(resource_name) if not self._find(path): result = None else: if self._is_directory(path): result = ResourceContainer(self, resource_name) else: result = Resource(self, resource_name) result.path = path return result def get_stream(self, resource): return open(resource.path, 'rb') def get_bytes(self, resource): with open(resource.path, 'rb') as f: return f.read() def get_size(self, resource): return os.path.getsize(resource.path) def get_resources(self, resource): def allowed(f): return (f != '__pycache__' and not f.endswith(self.skipped_extensions)) return set([f for f in os.listdir(resource.path) if allowed(f)]) def is_container(self, resource): return self._is_directory(resource.path) _is_directory = staticmethod(os.path.isdir) def iterator(self, resource_name): resource = self.find(resource_name) if resource is not None: todo = [resource] while todo: resource = todo.pop(0) yield resource if resource.is_container: rname = resource.name for name in resource.resources: if not rname: new_name = name else: new_name = '/'.join([rname, name]) child = self.find(new_name) if child.is_container: todo.append(child) else: yield child class ZipResourceFinder(ResourceFinder): """ Resource finder for resources in .zip files. """ def __init__(self, module): super(ZipResourceFinder, self).__init__(module) archive = self.loader.archive self.prefix_len = 1 + len(archive) # PyPy doesn't have a _files attr on zipimporter, and you can't set one if hasattr(self.loader, '_files'): self._files = self.loader._files else: self._files = zipimport._zip_directory_cache[archive] self.index = sorted(self._files) def _adjust_path(self, path): return path def _find(self, path): path = path[self.prefix_len:] if path in self._files: result = True else: if path and path[-1] != os.sep: path = path + os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False if not result: logger.debug('_find failed: %r %r', path, self.loader.prefix) else: logger.debug('_find worked: %r %r', path, self.loader.prefix) return result def get_cache_info(self, resource): prefix = self.loader.archive path = resource.path[1 + len(prefix):] return prefix, path def get_bytes(self, resource): return self.loader.get_data(resource.path) def get_stream(self, resource): return io.BytesIO(self.get_bytes(resource)) def get_size(self, resource): path = resource.path[self.prefix_len:] return self._files[path][3] def get_resources(self, resource): path = resource.path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep plen = len(path) result = set() i = bisect.bisect(self.index, path) while i < len(self.index): if not self.index[i].startswith(path): break s = self.index[i][plen:] result.add(s.split(os.sep, 1)[0]) # only immediate children i += 1 return result def _is_directory(self, path): path = path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False return result _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder } try: import _frozen_importlib _finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder _finder_registry[_frozen_importlib.FileFinder] = ResourceFinder except (ImportError, AttributeError): pass def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker _finder_cache = {} def finder(package): """ Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package. """ if package in _finder_cache: result = _finder_cache[package] else: if package not in sys.modules: __import__(package) module = sys.modules[package] path = getattr(module, '__path__', None) if path is None: raise DistlibException('You cannot get a finder for a module, ' 'only for a package') loader = getattr(module, '__loader__', None) finder_maker = _finder_registry.get(type(loader)) if finder_maker is None: raise DistlibException('Unable to locate finder for %r' % package) result = finder_maker(module) _finder_cache[package] = result return result _dummy_module = types.ModuleType(str('__dummy__')) def finder_for_path(path): """ Return a resource finder for a path, which should represent a container. :param path: The path. :return: A :class:`ResourceFinder` instance for the path. """ result = None # calls any path hooks, gets importer into cache pkgutil.get_importer(path) loader = sys.path_importer_cache.get(path) finder = _finder_registry.get(type(loader)) if finder: module = _dummy_module module.__file__ = os.path.join(path, '') module.__loader__ = loader result = finder(module) return result
gpl-3.0
mjpost/sacreBLEU
sacrebleu/sacrebleu.py
1
17856
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. It also knows all the standard test sets and handles downloading, processing, and tokenization for you. See the [README.md] file for more information. """ import io import sys import logging import pathlib import argparse # Allows calling the script as a standalone utility # See: https://github.com/mjpost/sacrebleu/issues/86 if __package__ is None and __name__ == '__main__': parent = pathlib.Path(__file__).absolute().parents[1] sys.path.insert(0, str(parent)) __package__ = 'sacrebleu' from .tokenizers import TOKENIZERS, DEFAULT_TOKENIZER from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS from .metrics import METRICS from .utils import smart_open, filter_subset, get_available_origlangs, SACREBLEU_DIR from .utils import get_langpairs_for_testset, get_available_testsets from .utils import print_test_set, get_reference_files, download_test_set from . import __version__ as VERSION sacrelogger = logging.getLogger('sacrebleu') try: # SIGPIPE is not available on Windows machines, throwing an exception. from signal import SIGPIPE # If SIGPIPE is available, change behaviour to default instead of ignore. from signal import signal, SIG_DFL signal(SIGPIPE, SIG_DFL) except ImportError: sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)') def parse_args(): arg_parser = argparse.ArgumentParser( description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n' 'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n' ' cat output.detok.de | sacrebleu -t wmt14 -l en-de', formatter_class=argparse.RawDescriptionHelpFormatter) arg_parser.add_argument('--citation', '--cite', default=False, action='store_true', help='dump the bibtex citation and quit.') arg_parser.add_argument('--list', default=False, action='store_true', help='print a list of all available test sets.') arg_parser.add_argument('--test-set', '-t', type=str, default=None, help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated') arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None, help='source-target language pair (2-char ISO639-1 codes)') arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None, help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation') arg_parser.add_argument('--subset', dest='subset', default=None, help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)') arg_parser.add_argument('--download', type=str, default=None, help='download a test set and quit') arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None, help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit') # I/O related arguments arg_parser.add_argument('--input', '-i', type=str, default='-', help='Read input from a file instead of STDIN') arg_parser.add_argument('refs', nargs='*', default=[], help='optional list of references (for backwards-compatibility with older scripts)') arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.') arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8', help='open text files with specified encoding (default: %(default)s)') # Metric selection arg_parser.add_argument('--metrics', '-m', choices=METRICS.keys(), nargs='+', default=['bleu'], help='metrics to compute (default: bleu)') arg_parser.add_argument('--sentence-level', '-sl', action='store_true', help='Output metric on each sentence.') # BLEU-related arguments arg_parser.add_argument('-lc', action='store_true', default=False, help='Use case-insensitive BLEU (default: False)') arg_parser.add_argument('--smooth-method', '-s', choices=METRICS['bleu'].SMOOTH_DEFAULTS.keys(), default='exp', help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none') arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None, help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format( METRICS['bleu'].SMOOTH_DEFAULTS['floor'], METRICS['bleu'].SMOOTH_DEFAULTS['add-k'])) arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None, help='Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, `mecab` for Japanese and `mteval-v13a` otherwise.') arg_parser.add_argument('--force', default=False, action='store_true', help='insist that your tokenized input is actually detokenized') # ChrF-related arguments arg_parser.add_argument('--chrf-order', type=int, default=METRICS['chrf'].ORDER, help='chrf character order (default: %(default)s)') arg_parser.add_argument('--chrf-beta', type=int, default=METRICS['chrf'].BETA, help='chrf BETA parameter (default: %(default)s)') arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False, help='include whitespace in chrF calculation (default: %(default)s)') # Reporting related arguments arg_parser.add_argument('--quiet', '-q', default=False, action='store_true', help='suppress informative output') arg_parser.add_argument('--short', default=False, action='store_true', help='produce a shorter (less human readable) signature') arg_parser.add_argument('--score-only', '-b', default=False, action='store_true', help='output only the BLEU score') arg_parser.add_argument('--width', '-w', type=int, default=1, help='floating point width (default: %(default)s)') arg_parser.add_argument('--detail', '-d', default=False, action='store_true', help='print extra information (split test sets based on origlang)') arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(VERSION)) args = arg_parser.parse_args() return args def main(): args = parse_args() # Explicitly set the encoding sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n") sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True) if not args.quiet: logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s') if args.download: download_test_set(args.download, args.langpair) sys.exit(0) if args.list: if args.test_set: print(' '.join(get_langpairs_for_testset(args.test_set))) else: print('The available test sets are:') for testset in get_available_testsets(): print('%30s: %s' % (testset, DATASETS[testset].get('description', '').strip())) sys.exit(0) if args.sentence_level and len(args.metrics) > 1: sacrelogger.error('Only one metric can be used with Sentence-level reporting.') sys.exit(1) if args.citation: if not args.test_set: sacrelogger.error('I need a test set (-t).') sys.exit(1) for test_set in args.test_set.split(','): if 'citation' not in DATASETS[test_set]: sacrelogger.error('No citation found for %s', test_set) else: print(DATASETS[test_set]['citation']) sys.exit(0) if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1): sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.') sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),') sacrelogger.error('and you cannot then provide multiple reference files.') sys.exit(1) if args.test_set is not None: for test_set in args.test_set.split(','): if test_set not in DATASETS: sacrelogger.error('Unknown test set "%s"', test_set) sacrelogger.error('Please run with --list to see the available test sets.') sys.exit(1) if args.test_set is None: if len(args.refs) == 0: sacrelogger.error('I need either a predefined test set (-t) or a list of references') sacrelogger.error(get_available_testsets()) sys.exit(1) elif len(args.refs) > 0: sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references') sys.exit(1) elif args.langpair is None: sacrelogger.error('I need a language pair (-l).') sys.exit(1) else: for test_set in args.test_set.split(','): langpairs = get_langpairs_for_testset(test_set) if args.langpair not in langpairs: sacrelogger.error('No such language pair "%s"', args.langpair) sacrelogger.error('Available language pairs for test set "%s": %s', test_set, ', '.join(langpairs)) sys.exit(1) if args.echo: if args.langpair is None or args.test_set is None: sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)") sys.exit(1) for test_set in args.test_set.split(','): print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset) sys.exit(0) if args.test_set is not None and args.tokenize == 'none': sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n" "your own reference tokenization. Published numbers will not be comparable with other papers.\n") if 'ter' in args.metrics and args.tokenize is not None: logging.warning("Your setting of --tokenize will be ignored when " "computing TER") # Internal tokenizer settings if args.tokenize is None: # set default if args.langpair is not None and args.langpair.split('-')[1] == 'zh': args.tokenize = 'zh' elif args.langpair is not None and args.langpair.split('-')[1] == 'ja': args.tokenize = 'ja-mecab' else: args.tokenize = DEFAULT_TOKENIZER if args.langpair is not None and 'bleu' in args.metrics: if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh': sacrelogger.warning('You should also pass "--tok zh" when scoring Chinese...') if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'): sacrelogger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...') # concat_ref_files is a list of list of reference filenames, for example: # concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]] if args.test_set is None: concat_ref_files = [args.refs] else: concat_ref_files = [] for test_set in args.test_set.split(','): ref_files = get_reference_files(test_set, args.langpair) if len(ref_files) == 0: sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair)) concat_ref_files.append(ref_files) # Read references full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))] for ref_files in concat_ref_files: for refno, ref_file in enumerate(ref_files): for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1): if args.num_refs != 1: splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1) if len(splits) != args.num_refs: sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits))) sys.exit(17) for refno, split in enumerate(splits): full_refs[refno].append(split) else: full_refs[refno].append(line) # Decide on the number of final references, override the argument args.num_refs = len(full_refs) # Read hypotheses stream if args.input == '-': inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) else: inputfh = smart_open(args.input, encoding=args.encoding) full_system = inputfh.readlines() # Filter sentences according to a given origlang system, *refs = filter_subset( [full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset) if len(system) == 0: message = 'Test set %s contains no sentence' % args.test_set if args.origlang is not None or args.subset is not None: message += ' with' message += '' if args.origlang is None else ' origlang=' + args.origlang message += '' if args.subset is None else ' subset=' + args.subset sacrelogger.error(message) sys.exit(1) # Create metric inventory, let each metric consume relevant args from argparse metrics = [METRICS[met](args) for met in args.metrics] # Handle sentence level and quit if args.sentence_level: # one metric in use for sentence-level metric = metrics[0] for output, *references in zip(system, *refs): score = metric.sentence_score(output, references) print(score.format(args.width, args.score_only, metric.signature)) sys.exit(0) # Else, handle system level for metric in metrics: try: score = metric.corpus_score(system, refs) except EOFError: sacrelogger.error('The input and reference stream(s) were of different lengths.') if args.test_set is not None: sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n' 'If the latter, you can clean out the references cache by typing:\n' '\n' ' rm -r %s/%s\n' '\n' 'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR, args.test_set) sys.exit(1) else: print(score.format(args.width, args.score_only, metric.signature)) if args.detail: width = args.width sents_digits = len(str(len(full_system))) origlangs = args.origlang if args.origlang else get_available_origlangs(args.test_set, args.langpair) for origlang in origlangs: subsets = [None] if args.subset is not None: subsets += [args.subset] elif all(t in SUBSETS for t in args.test_set.split(',')): subsets += COUNTRIES + DOMAINS for subset in subsets: system, *refs = filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset) if len(system) == 0: continue if subset in COUNTRIES: subset_str = '%20s' % ('country=' + subset) elif subset in DOMAINS: subset_str = '%20s' % ('domain=' + subset) else: subset_str = '%20s' % '' for metric in metrics: # FIXME: handle this in metrics if metric.name == 'bleu': _refs = refs elif metric.name == 'chrf': _refs = refs[0] score = metric.corpus_score(system, _refs) print('origlang={} {}: sentences={:{}} {}={:{}.{}f}'.format( origlang, subset_str, len(system), sents_digits, score.prefix, score.score, width+4, width)) if __name__ == '__main__': main()
apache-2.0
zofuthan/edx-platform
openedx/core/djangoapps/user_api/accounts/image_helpers.py
42
5427
""" Helper functions for the accounts API. """ import hashlib from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.core.files.storage import get_storage_class from staticfiles.storage import staticfiles_storage from microsite_configuration import microsite from student.models import UserProfile from ..errors import UserNotFound PROFILE_IMAGE_FILE_EXTENSION = 'jpg' # All processed profile images are converted to JPEGs PROFILE_IMAGE_SIZES_MAP = { 'full': 500, 'large': 120, 'medium': 50, 'small': 30 } _PROFILE_IMAGE_SIZES = PROFILE_IMAGE_SIZES_MAP.values() def get_profile_image_storage(): """ Configures and returns a django Storage instance that can be used to physically locate, read and write profile images. """ config = settings.PROFILE_IMAGE_BACKEND storage_class = get_storage_class(config['class']) return storage_class(**config['options']) def _make_profile_image_name(username): """ Returns the user-specific part of the image filename, based on a hash of the username. """ return hashlib.md5(settings.PROFILE_IMAGE_SECRET_KEY + username).hexdigest() def _get_profile_image_filename(name, size, file_extension=PROFILE_IMAGE_FILE_EXTENSION): """ Returns the full filename for a profile image, given the name and size. """ return '{name}_{size}.{file_extension}'.format(name=name, size=size, file_extension=file_extension) def _get_profile_image_urls(name, storage, file_extension=PROFILE_IMAGE_FILE_EXTENSION, version=None): """ Returns a dict containing the urls for a complete set of profile images, keyed by "friendly" name (e.g. "full", "large", "medium", "small"). """ def _make_url(size): # pylint: disable=missing-docstring url = storage.url( _get_profile_image_filename(name, size, file_extension=file_extension) ) return '{}?v={}'.format(url, version) if version is not None else url return {size_display_name: _make_url(size) for size_display_name, size in PROFILE_IMAGE_SIZES_MAP.items()} def get_profile_image_names(username): """ Returns a dict containing the filenames for a complete set of profile images, keyed by pixel size. """ name = _make_profile_image_name(username) return {size: _get_profile_image_filename(name, size) for size in _PROFILE_IMAGE_SIZES} def get_profile_image_urls_for_user(user, request=None): """ Return a dict {size:url} for each profile image for a given user. Notes: - this function does not determine whether the set of profile images exists, only what the URLs will be if they do exist. It is assumed that callers will use `_get_default_profile_image_urls` instead to provide a set of urls that point to placeholder images, when there are no user- submitted images. - based on the value of django.conf.settings.PROFILE_IMAGE_BACKEND, the URL may be relative, and in that case the caller is responsible for constructing the full URL if needed. Arguments: user (django.contrib.auth.User): the user for whom we are getting urls. Returns: dictionary of {size_display_name: url} for each image. """ if user.profile.has_profile_image: urls = _get_profile_image_urls( _make_profile_image_name(user.username), get_profile_image_storage(), version=user.profile.profile_image_uploaded_at.strftime("%s"), ) else: urls = _get_default_profile_image_urls() if request: for key, value in urls.items(): urls[key] = request.build_absolute_uri(value) return urls def _get_default_profile_image_urls(): """ Returns a dict {size:url} for a complete set of default profile images, used as a placeholder when there are no user-submitted images. TODO The result of this function should be memoized, but not in tests. """ return _get_profile_image_urls( microsite.get_value('PROFILE_IMAGE_DEFAULT_FILENAME', settings.PROFILE_IMAGE_DEFAULT_FILENAME), staticfiles_storage, file_extension=settings.PROFILE_IMAGE_DEFAULT_FILE_EXTENSION, ) def set_has_profile_image(username, is_uploaded, upload_dt=None): """ System (not user-facing) API call used to store whether the user has uploaded a profile image, and if so, when. Used by profile_image API. Arguments: username (django.contrib.auth.User.username): references the user who uploaded an image. is_uploaded (bool): whether or not the user has an uploaded profile image. upload_dt (datetime.datetime): If `is_uploaded` is True, this should contain the server-side date+time of the upload. If `is_uploaded` is False, the parameter is optional and will be ignored. Raises: ValueError: is_uploaded was True, but no upload datetime was supplied. UserNotFound: no user with username `username` exists. """ if is_uploaded and upload_dt is None: raise ValueError("No upload datetime was supplied.") elif not is_uploaded: upload_dt = None try: profile = UserProfile.objects.get(user__username=username) except ObjectDoesNotExist: raise UserNotFound() profile.profile_image_uploaded_at = upload_dt profile.save()
agpl-3.0
Austen-G/BuildingBlocks
closure-library/closure/bin/calcdeps.py
42
18576
#!/usr/bin/env python # # Copyright 2006 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Calculates JavaScript dependencies without requiring Google's build system. This tool is deprecated and is provided for legacy users. See build/closurebuilder.py and build/depswriter.py for the current tools. It iterates over a number of search paths and builds a dependency tree. With the inputs provided, it walks the dependency tree and outputs all the files required for compilation. """ try: import distutils.version except ImportError: # distutils is not available in all environments distutils = None import logging import optparse import os import re import subprocess import sys _BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)' req_regex = re.compile(_BASE_REGEX_STRING % 'require') prov_regex = re.compile(_BASE_REGEX_STRING % 'provide') ns_regex = re.compile('^ns:((\w+\.)*(\w+))$') version_regex = re.compile('[\.0-9]+') def IsValidFile(ref): """Returns true if the provided reference is a file and exists.""" return os.path.isfile(ref) def IsJsFile(ref): """Returns true if the provided reference is a Javascript file.""" return ref.endswith('.js') def IsNamespace(ref): """Returns true if the provided reference is a namespace.""" return re.match(ns_regex, ref) is not None def IsDirectory(ref): """Returns true if the provided reference is a directory.""" return os.path.isdir(ref) def ExpandDirectories(refs): """Expands any directory references into inputs. Description: Looks for any directories in the provided references. Found directories are recursively searched for .js files, which are then added to the result list. Args: refs: a list of references such as files, directories, and namespaces Returns: A list of references with directories removed and replaced by any .js files that are found in them. Also, the paths will be normalized. """ result = [] for ref in refs: if IsDirectory(ref): # Disable 'Unused variable' for subdirs # pylint: disable=unused-variable for (directory, subdirs, filenames) in os.walk(ref): for filename in filenames: if IsJsFile(filename): result.append(os.path.join(directory, filename)) else: result.append(ref) return map(os.path.normpath, result) class DependencyInfo(object): """Represents a dependency that is used to build and walk a tree.""" def __init__(self, filename): self.filename = filename self.provides = [] self.requires = [] def __str__(self): return '%s Provides: %s Requires: %s' % (self.filename, repr(self.provides), repr(self.requires)) def BuildDependenciesFromFiles(files): """Build a list of dependencies from a list of files. Description: Takes a list of files, extracts their provides and requires, and builds out a list of dependency objects. Args: files: a list of files to be parsed for goog.provides and goog.requires. Returns: A list of dependency objects, one for each file in the files argument. """ result = [] filenames = set() for filename in files: if filename in filenames: continue # Python 3 requires the file encoding to be specified if (sys.version_info[0] < 3): file_handle = open(filename, 'r') else: file_handle = open(filename, 'r', encoding='utf8') try: dep = CreateDependencyInfo(filename, file_handle) result.append(dep) finally: file_handle.close() filenames.add(filename) return result def CreateDependencyInfo(filename, source): """Create dependency info. Args: filename: Filename for source. source: File-like object containing source. Returns: A DependencyInfo object with provides and requires filled. """ dep = DependencyInfo(filename) for line in source: if re.match(req_regex, line): dep.requires.append(re.search(req_regex, line).group(1)) if re.match(prov_regex, line): dep.provides.append(re.search(prov_regex, line).group(1)) return dep def BuildDependencyHashFromDependencies(deps): """Builds a hash for searching dependencies by the namespaces they provide. Description: Dependency objects can provide multiple namespaces. This method enumerates the provides of each dependency and adds them to a hash that can be used to easily resolve a given dependency by a namespace it provides. Args: deps: a list of dependency objects used to build the hash. Raises: Exception: If a multiple files try to provide the same namepace. Returns: A hash table { namespace: dependency } that can be used to resolve a dependency by a namespace it provides. """ dep_hash = {} for dep in deps: for provide in dep.provides: if provide in dep_hash: raise Exception('Duplicate provide (%s) in (%s, %s)' % ( provide, dep_hash[provide].filename, dep.filename)) dep_hash[provide] = dep return dep_hash def CalculateDependencies(paths, inputs): """Calculates the dependencies for given inputs. Description: This method takes a list of paths (files, directories) and builds a searchable data structure based on the namespaces that each .js file provides. It then parses through each input, resolving dependencies against this data structure. The final output is a list of files, including the inputs, that represent all of the code that is needed to compile the given inputs. Args: paths: the references (files, directories) that are used to build the dependency hash. inputs: the inputs (files, directories, namespaces) that have dependencies that need to be calculated. Raises: Exception: if a provided input is invalid. Returns: A list of all files, including inputs, that are needed to compile the given inputs. """ deps = BuildDependenciesFromFiles(paths + inputs) search_hash = BuildDependencyHashFromDependencies(deps) result_list = [] seen_list = [] for input_file in inputs: if IsNamespace(input_file): namespace = re.search(ns_regex, input_file).group(1) if namespace not in search_hash: raise Exception('Invalid namespace (%s)' % namespace) input_file = search_hash[namespace].filename if not IsValidFile(input_file) or not IsJsFile(input_file): raise Exception('Invalid file (%s)' % input_file) seen_list.append(input_file) file_handle = open(input_file, 'r') try: for line in file_handle: if re.match(req_regex, line): require = re.search(req_regex, line).group(1) ResolveDependencies(require, search_hash, result_list, seen_list) finally: file_handle.close() result_list.append(input_file) # All files depend on base.js, so put it first. base_js_path = FindClosureBasePath(paths) if base_js_path: result_list.insert(0, base_js_path) else: logging.warning('Closure Library base.js not found.') return result_list def FindClosureBasePath(paths): """Given a list of file paths, return Closure base.js path, if any. Args: paths: A list of paths. Returns: The path to Closure's base.js file including filename, if found. """ for path in paths: pathname, filename = os.path.split(path) if filename == 'base.js': f = open(path) is_base = False # Sanity check that this is the Closure base file. Check that this # is where goog is defined. This is determined by the @provideGoog # flag. for line in f: if '@provideGoog' in line: is_base = True break f.close() if is_base: return path def ResolveDependencies(require, search_hash, result_list, seen_list): """Takes a given requirement and resolves all of the dependencies for it. Description: A given requirement may require other dependencies. This method recursively resolves all dependencies for the given requirement. Raises: Exception: when require does not exist in the search_hash. Args: require: the namespace to resolve dependencies for. search_hash: the data structure used for resolving dependencies. result_list: a list of filenames that have been calculated as dependencies. This variable is the output for this function. seen_list: a list of filenames that have been 'seen'. This is required for the dependency->dependent ordering. """ if require not in search_hash: raise Exception('Missing provider for (%s)' % require) dep = search_hash[require] if not dep.filename in seen_list: seen_list.append(dep.filename) for sub_require in dep.requires: ResolveDependencies(sub_require, search_hash, result_list, seen_list) result_list.append(dep.filename) def GetDepsLine(dep, base_path): """Returns a JS string for a dependency statement in the deps.js file. Args: dep: The dependency that we're printing. base_path: The path to Closure's base.js including filename. """ return 'goog.addDependency("%s", %s, %s);' % ( GetRelpath(dep.filename, base_path), dep.provides, dep.requires) def GetRelpath(path, start): """Return a relative path to |path| from |start|.""" # NOTE: Python 2.6 provides os.path.relpath, which has almost the same # functionality as this function. Since we want to support 2.4, we have # to implement it manually. :( path_list = os.path.abspath(os.path.normpath(path)).split(os.sep) start_list = os.path.abspath( os.path.normpath(os.path.dirname(start))).split(os.sep) common_prefix_count = 0 for i in range(0, min(len(path_list), len(start_list))): if path_list[i] != start_list[i]: break common_prefix_count += 1 # Always use forward slashes, because this will get expanded to a url, # not a file path. return '/'.join(['..'] * (len(start_list) - common_prefix_count) + path_list[common_prefix_count:]) def PrintLine(msg, out): out.write(msg) out.write('\n') def PrintDeps(source_paths, deps, out): """Print out a deps.js file from a list of source paths. Args: source_paths: Paths that we should generate dependency info for. deps: Paths that provide dependency info. Their dependency info should not appear in the deps file. out: The output file. Returns: True on success, false if it was unable to find the base path to generate deps relative to. """ base_path = FindClosureBasePath(source_paths + deps) if not base_path: return False PrintLine('// This file was autogenerated by calcdeps.py', out) excludesSet = set(deps) for dep in BuildDependenciesFromFiles(source_paths + deps): if not dep.filename in excludesSet: PrintLine(GetDepsLine(dep, base_path), out) return True def PrintScript(source_paths, out): for index, dep in enumerate(source_paths): PrintLine('// Input %d' % index, out) f = open(dep, 'r') PrintLine(f.read(), out) f.close() def GetJavaVersion(): """Returns the string for the current version of Java installed.""" proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE) proc.wait() version_line = proc.stderr.read().splitlines()[0] return version_regex.search(version_line.decode('utf-8')).group() def FilterByExcludes(options, files): """Filters the given files by the exlusions specified at the command line. Args: options: The flags to calcdeps. files: The files to filter. Returns: A list of files. """ excludes = [] if options.excludes: excludes = ExpandDirectories(options.excludes) excludesSet = set(excludes) return [i for i in files if not i in excludesSet] def GetPathsFromOptions(options): """Generates the path files from flag options. Args: options: The flags to calcdeps. Returns: A list of files in the specified paths. (strings). """ search_paths = options.paths if not search_paths: search_paths = ['.'] # Add default folder if no path is specified. search_paths = ExpandDirectories(search_paths) return FilterByExcludes(options, search_paths) def GetInputsFromOptions(options): """Generates the inputs from flag options. Args: options: The flags to calcdeps. Returns: A list of inputs (strings). """ inputs = options.inputs if not inputs: # Parse stdin logging.info('No inputs specified. Reading from stdin...') inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()]) logging.info('Scanning files...') inputs = ExpandDirectories(inputs) return FilterByExcludes(options, inputs) def Compile(compiler_jar_path, source_paths, out, flags=None): """Prepares command-line call to Closure compiler. Args: compiler_jar_path: Path to the Closure compiler .jar file. source_paths: Source paths to build, in order. flags: A list of additional flags to pass on to Closure compiler. """ args = ['java', '-jar', compiler_jar_path] for path in source_paths: args += ['--js', path] if flags: args += flags logging.info('Compiling with the following command: %s', ' '.join(args)) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (stdoutdata, stderrdata) = proc.communicate() if proc.returncode != 0: logging.error('JavaScript compilation failed.') sys.exit(1) else: out.write(stdoutdata.decode('utf-8')) def main(): """The entrypoint for this script.""" logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO) usage = 'usage: %prog [options] arg' parser = optparse.OptionParser(usage) parser.add_option('-i', '--input', dest='inputs', action='append', help='The inputs to calculate dependencies for. Valid ' 'values can be files, directories, or namespaces ' '(ns:goog.net.XhrIo). Only relevant to "list" and ' '"script" output.') parser.add_option('-p', '--path', dest='paths', action='append', help='The paths that should be traversed to build the ' 'dependencies.') parser.add_option('-d', '--dep', dest='deps', action='append', help='Directories or files that should be traversed to ' 'find required dependencies for the deps file. ' 'Does not generate dependency information for names ' 'provided by these files. Only useful in "deps" mode.') parser.add_option('-e', '--exclude', dest='excludes', action='append', help='Files or directories to exclude from the --path ' 'and --input flags') parser.add_option('-o', '--output_mode', dest='output_mode', action='store', default='list', help='The type of output to generate from this script. ' 'Options are "list" for a list of filenames, "script" ' 'for a single script containing the contents of all the ' 'file, "deps" to generate a deps.js file for all ' 'paths, or "compiled" to produce compiled output with ' 'the Closure compiler.') parser.add_option('-c', '--compiler_jar', dest='compiler_jar', action='store', help='The location of the Closure compiler .jar file.') parser.add_option('-f', '--compiler_flag', '--compiler_flags', # for backwards compatibility dest='compiler_flags', action='append', help='Additional flag to pass to the Closure compiler. ' 'May be specified multiple times to pass multiple flags.') parser.add_option('--output_file', dest='output_file', action='store', help=('If specified, write output to this path instead of ' 'writing to standard output.')) (options, args) = parser.parse_args() search_paths = GetPathsFromOptions(options) if options.output_file: out = open(options.output_file, 'w') else: out = sys.stdout if options.output_mode == 'deps': result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out) if not result: logging.error('Could not find Closure Library in the specified paths') sys.exit(1) return inputs = GetInputsFromOptions(options) logging.info('Finding Closure dependencies...') deps = CalculateDependencies(search_paths, inputs) output_mode = options.output_mode if output_mode == 'script': PrintScript(deps, out) elif output_mode == 'list': # Just print out a dep per line for dep in deps: PrintLine(dep, out) elif output_mode == 'compiled': # Make sure a .jar is specified. if not options.compiler_jar: logging.error('--compiler_jar flag must be specified if --output is ' '"compiled"') sys.exit(1) # User friendly version check. if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) > distutils.version.LooseVersion('1.6')): logging.error('Closure Compiler requires Java 1.6 or higher.') logging.error('Please visit http://www.java.com/getjava') sys.exit(1) Compile(options.compiler_jar, deps, out, options.compiler_flags) else: logging.error('Invalid value for --output flag.') sys.exit(1) if __name__ == '__main__': main()
cc0-1.0
Nu3001/external_chromium_org
tools/telemetry/telemetry/unittest/system_stub.py
35
4383
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides stubs for os, sys and subprocess for testing This test allows one to test code that itself uses os, sys, and subprocess. """ import os import re import shlex import sys as real_sys class Override(object): def __init__(self, base_module, module_list): stubs = {'adb_commands': AdbCommandsModuleStub, 'os': OsModuleStub, 'subprocess': SubprocessModuleStub, 'sys': SysModuleStub, } self.adb_commands = None self.os = None self.subprocess = None self.sys = None self._base_module = base_module self._overrides = {} for module_name in module_list: self._overrides[module_name] = getattr(base_module, module_name) setattr(self, module_name, stubs[module_name]()) setattr(base_module, module_name, getattr(self, module_name)) if self.os and self.sys: self.os.path.sys = self.sys def __del__(self): assert not len(self._overrides) def Restore(self): for module_name, original_module in self._overrides.iteritems(): setattr(self._base_module, module_name, original_module) self._overrides = {} class AdbCommandsModuleStub(object): # adb not even found # android_browser_finder not returning class AdbCommandsStub(object): def __init__(self, module, device): self._module = module self._device = device self.is_root_enabled = True def RunShellCommand(self, args): if isinstance(args, basestring): args = shlex.split(args) handler = self._module.shell_command_handlers[args[0]] return handler(args) def IsRootEnabled(self): return self.is_root_enabled def __init__(self): self.attached_devices = [] self.shell_command_handlers = {} def AdbCommandsStubConstructor(device=None): return AdbCommandsModuleStub.AdbCommandsStub(self, device) self.AdbCommands = AdbCommandsStubConstructor @staticmethod def IsAndroidSupported(): return True def GetAttachedDevices(self): return self.attached_devices @staticmethod def HasForwarder(_=None): return True class OsModuleStub(object): class OsPathModuleStub(object): def __init__(self, sys_module): self.sys = sys_module self.files = [] def exists(self, path): return path in self.files def join(self, *paths): def IsAbsolutePath(path): if self.sys.platform.startswith('win'): return re.match('[a-zA-Z]:\\\\', path) else: return path.startswith('/') # Per Python specification, if any component is an absolute path, # discard previous components. for index, path in reversed(list(enumerate(paths))): if IsAbsolutePath(path): paths = paths[index:] break if self.sys.platform.startswith('win'): tmp = os.path.join(*paths) return tmp.replace('/', '\\') else: tmp = os.path.join(*paths) return tmp.replace('\\', '/') def expanduser(self, filename): return os.path.expanduser(filename) def dirname(self, filename): # pylint: disable=R0201 return os.path.dirname(filename) def __init__(self, sys_module=real_sys): self.path = OsModuleStub.OsPathModuleStub(sys_module) self.display = ':0' self.local_app_data = None self.program_files = None self.program_files_x86 = None self.devnull = os.devnull def getenv(self, name): if name == 'DISPLAY': return self.display elif name == 'LOCALAPPDATA': return self.local_app_data elif name == 'PROGRAMFILES': return self.program_files elif name == 'PROGRAMFILES(X86)': return self.program_files_x86 raise Exception('Unsupported getenv') class SubprocessModuleStub(object): class PopenStub(object): def __init__(self): self.communicate_result = ('', '') def __call__(self, args, **kwargs): return self def communicate(self): return self.communicate_result def __init__(self): self.Popen = SubprocessModuleStub.PopenStub() self.PIPE = None def call(self, *args, **kwargs): raise NotImplementedError() class SysModuleStub(object): def __init__(self): self.platform = ''
bsd-3-clause
KevinKazama/game
jeutennis/management/commands/tournoi2.py
1
12445
from django.core.management.base import BaseCommand, CommandError from jeutennis.models import table_joueurs, table_match, table_tournoi from django.utils import timezone import datetime import random import time from collections import OrderedDict list_tournoi = [] id_tournoi = [] list_part = [] domicile = [] exterieur = [] date_time = datetime.datetime.now() class Command(BaseCommand): help = 'Gestion tournoi' def handle(self, *args, **options): if len(args) == 0: print 'no args' try: req_part = table_tournoi.objects.filter(date_tournoi__gte=date_time).order_by('-date_tournoi')[:3] #print("tournoi "+req_part.nom) for s in req_part: print(s.nom) list_tournoi.append(s.nom) id_tournoi.append(s.id) fichier = open("/kevin/python/Projets/tennis/jeu/jeutennis/tournois/"+str(s.nom)+".txt","r") line = fichier.read().splitlines() #print(line) for x in line: #print(x) list_part.append(x) i = 0 while i < len(list_part): if i % 2 == 0: domicile.append(list_part[i]) else: exterieur.append(list_part[i]) i += 1 j = 0 #print(domicile) while j < len(domicile): #print(str(domicile[j])+' vs '+str(exterieur[j])) joueur1 = table_joueurs.objects.get(id = domicile[j]) joueur2 = table_joueurs.objects.get(id = exterieur[j]) j1 = str(joueur1.prenom+" "+joueur1.nom) j2 = str(joueur2.prenom+" "+joueur2.nom) #print(j1+" vs "+j2) """ while j < 1: joueur1 = table_joueurs.objects.get(id = 1) joueur2 = table_joueurs.objects.get(id = 11) j1 = str(joueur1.prenom+" "+joueur1.nom) j2 = str(joueur2.prenom+" "+joueur2.nom) """ #Jeux nb1 = 0 nb2 = 0 #Sets sets = 0 set1 = 0 set2 = 0 sj1 = [] sj2 = [] s1j1 = [] s2j1 = [] s3j1 = [] s4j1 = [] s5j1 = [] s1j2 = [] s2j2 = [] s3j2 = [] s4j2 = [] s5j2 = [] #Scores res1 = [] res2 = [] #Tour de jeu tour = 0 #Caracteristiques serv1 = joueur1.service serv2 = joueur2.service ret1 = joueur1.retour ret2 = joueur2.retour end1 = joueur1.endurance end2 = joueur2.endurance con1 = joueur1.concentration con2 = joueur2.concentration diff = 0 comm = [] message = [] nbtour = [] comptset = 0 while (set1 < 3) and (set2 < 3): nb1 = 0 nb2 = 0 #Boucle sets while (nb1 < 6) and (nb2 < 6): tour += 1 #print(tour) nbtour.append(tour) if tour % 2 == 0: diff = serv1 - ret2 else: diff = ret1 - serv2 alea = int(random.randrange(0,100)) if alea < 50+diff: nb1 += 1 else: nb2 += 1 #Baisse des stats endurance if serv1 < 1: serv1 = 0 else: serv1 = serv1 - end2/100 if ret1 < 1: ret1 = 0 else: ret1 = ret1 - end2/100 if con1: con1 = 0 else: con1 = con1 - end2/100 if serv2 < 1: serv2 = 0 else: serv2 = serv2 - end1/100 if ret2 < 1: ret2 = 0 else: ret2 = ret2 - end1/100 if con2 < 1: con2 = 0 else: con2 = con2 - end1/100 sj1.append(str(nb1)) sj2.append(str(nb2)) #Tie-Break if nb1 + nb2 == 11: while ((nb1 < 7) and (nb2 < 7)) and (abs(nb1-nb2) != 2): tour += 1 nbtour.append(tour) if tour % 2 == 0: diff = serv1 + con1 - ret2 - con2 else: diff = ret1 + con1 - ret2 - con2 alea = int(random.randrange(100)) if alea < 50+diff: nb1 += 1 else: nb2 += 1 #Baisse stats if serv1 < 1: serv1 = 0 else: serv1 = serv1 - end2/100 if ret1 < 1: ret1 = 0 else: ret1 = ret1 - end2/100 if con1 < 1: con1 = 0 else: con1 = con1 - end2/100 if serv2 < 1: serv2 = 0 else: serv2 = serv2 - end1/100 if ret2 < 1: ret2 = 0 else: ret2 = ret2 - end1/100 if con2 < 1: con2 = 0 else: con2 = con2 - end1/100 rendu = j1+" : "+str(nb1)+" | "+j2+" : "+str(nb2) sj1.append(str(nb1)) sj2.append(str(nb2)) comm.append(rendu) #Ajout scores comm.append("") res1.append(nb1) res2.append(nb2) #Nb sets sets += 1 #Add game number in set list if sets == 1: for x in sj1: s1j1.append(x) for x in sj2: s1j2.append(x) elif sets == 2: for x in sj1: s2j1.append(x) for x in sj2: s2j2.append(x) elif sets == 3: for x in sj1: s3j1.append(x) for x in sj2: s3j2.append(x) elif sets == 4: for x in sj1: s4j1.append(x) for x in sj2: s4j2.append(x) elif sets == 5: for x in sj1: s5j1.append(x) for x in sj2: s5j2.append(x) while comptset < len(sj1): sj1[comptset] = "." comptset += 1 comptset = 0 while comptset < len(sj2): sj2[comptset] = "." comptset += 1 comptset = 0 if nb1 > nb2: set1 += 1 #print(j1+" gagne le set "+str(set1+set2)+" : "+str(nb1)+"/"+str(nb2)) mess = j1+" gagne le set "+str(set1+set2)+" : "+str(nb1)+"/"+str(nb2) comm.append(mess) else: set2 += 1 #print(j2+" gagne le set "+str(set1+set2)+" : "+str(nb2)+"/"+str(nb1)) mess = j2+" gagne le set "+str(set1+set2)+" : "+str(nb2)+"/"+str(nb1) comm.append(mess) nset = len(res1) #print('nset = '+str(nset)) i = 0 win = [] win2 = [] while i < nset: win.append(str(res1[i])+"/"+str(res2[i])) win2.append(str(res2[i])+"/"+str(res1[i])) i += 1 strwin = ' - '.join(win) strwin2 = ' - '.join(win2) if set1 > set2: context2 = j1+" gagne "+strwin+" !" joueur1.victoire += 1 joueur2.defaite += 1 joueur1.points += 60 joueur2.points -= 50 winner = 1 else: context2 = j2+" gagne "+strwin2+" !" joueur1.defaite += 1 joueur2.victoire += 1 joueur1.points -= 60 joueur2.points += 50 winner = 2 joueur1.save() joueur2.save() res = [] tour = len(nbtour) + 1 score = len(nbtour) + 2 """ context = RequestContext(request, { 'j1' : j1, 'j2' : j2, 'res1' : res1, 'res2' : res2, 'set1' : set1, 'set2' : set2, 'comm' : comm, 'message' : message, 'context2' : context2, 'tour' : tour, 'score' : score, 's1j1' : s1j1, 's1j2' : s1j2, 's2j1' : s2j1, 's2j2' : s2j2, 's3j1' : s3j1, 's3j2' : s3j2, 's4j1' : s4j1, 's4j2' : s4j2, 's5j1' : s5j1, 's5j2' : s5j2, 'nbtour' : nbtour, }) """ g1j1 = res1[0] g2j1 = res1[1] g3j1 = res1[2] try: g4j1 = res1[3] except IndexError: g4j1 = None try: g5j1 = res1[4] except IndexError: g5j1 = None g1j2 = res2[0] g2j2 = res2[1] g3j2 = res2[2] try: g4j2 = res2[3] except IndexError: g4j2 = None try: g5j2 = res2[4] except IndexError: g5j2 = None if g4j1 == None: print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2)) elif g5j1 == None: print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2)+" - "+str(g4j1)+"/"+str(g4j2)) else: print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2)+" - "+str(g4j1)+"/"+str(g4j2)+" - "+str(g5j1)+"/"+str(g5j2)) add = table_match.objects.create(s1j1 = g1j1, s2j1 = g2j1, s3j1 = g3j1, s4j1 = g4j1, s5j1 = g5j1, s1j2 = g1j2, s2j2 = g2j2, s3j2 = g3j2, s4j2 = g4j2, s5j2 = g5j2, date_match = date_time, j1_id = joueur1.id, j2_id = joueur2.id, winner = winner, idtournoi_id = s.id) add.save() #Incremente j j += 1 except: print("Pas de tournoi") # def handle_noargs(self, **options): # raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method') # def handle_base(self, **options): # raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
gpl-3.0
Supermem/ibis
ibis/__init__.py
6
3678
# Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa __version__ = '0.5.0' from ibis.filesystems import HDFS, WebHDFS from ibis.common import IbisError import ibis.expr.api as api import ibis.expr.types as ir # __all__ is defined from ibis.expr.api import * import ibis.impala.api as impala import ibis.sql.sqlite.api as sqlite import ibis.config_init from ibis.config import options import ibis.util as util # Deprecated impala_connect = util.deprecate(impala.connect, 'impala_connect is deprecated, use' ' ibis.impala.connect instead') def make_client(db, hdfs_client=None): """ Create an Ibis client from a database connection and optional additional connections (like HDFS) Parameters ---------- db : Connection e.g. produced by ibis.impala.connect hdfs_client : ibis HDFS client Examples -------- >>> con = ibis.impala.connect(**impala_params) >>> hdfs = ibis.hdfs_connect(**hdfs_params) >>> client = ibis.make_client(con, hdfs_client=hdfs) Returns ------- client : IbisClient """ db._hdfs = hdfs_client return db make_client = util.deprecate( make_client, ('make_client is deprecated. ' 'Use ibis.impala.connect ' ' with hdfs_client=hdfs_client')) def hdfs_connect(host='localhost', port=50070, protocol='webhdfs', auth_mechanism='NOSASL', verify=True, **kwds): """ Connect to HDFS Parameters ---------- host : string, Host name of the HDFS NameNode port : int, NameNode's WebHDFS port (default 50070) protocol : {'webhdfs'} auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters. Set to GSSAPI or LDAP for Kerberos-secured clusters. verify : boolean, Set to False to turn off verifying SSL certificates. (default True) Other keywords are forwarded to hdfs library classes Returns ------- client : WebHDFS """ import requests session = kwds.setdefault('session', requests.Session()) session.verify = verify if auth_mechanism in ['GSSAPI', 'LDAP']: try: import requests_kerberos except ImportError: raise IbisError( "Unable to import requests-kerberos, which is required for " "Kerberos HDFS support. Install it by executing `pip install " "requests-kerberos` or `pip install hdfs[kerberos]`.") from hdfs.ext.kerberos import KerberosClient url = 'https://{0}:{1}'.format(host, port) # note SSL kwds.setdefault('mutual_auth', 'OPTIONAL') hdfs_client = KerberosClient(url, **kwds) else: from hdfs.client import InsecureClient url = 'http://{0}:{1}'.format(host, port) hdfs_client = InsecureClient(url, **kwds) return WebHDFS(hdfs_client) def test(impala=False): import pytest import ibis import os ibis_dir, _ = os.path.split(ibis.__file__) args = ['--pyargs', ibis_dir] if impala: args.append('--impala') pytest.main(args)
apache-2.0
mermi/bedrock
tests/functional/test_newsletter.py
2
1276
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import pytest from selenium.common.exceptions import TimeoutException from ..pages.newsletter import NewsletterPage @pytest.mark.sanity @pytest.mark.nondestructive def test_default_values(base_url, selenium): page = NewsletterPage(base_url, selenium).open() assert '' == page.email assert 'United States' == page.country assert 'English' == page.language assert page.html_format_selected assert not page.text_format_selected assert not page.privacy_policy_accepted def test_successful_sign_up(base_url, selenium): page = NewsletterPage(base_url, selenium).open() page.type_email('noreply@mozilla.com') page.select_country('United Kingdom') page.select_language('Polski') page.select_text_format() page.accept_privacy_policy() page.click_sign_me_up() assert page.sign_up_successful @pytest.mark.nondestructive def test_sign_up_fails_when_missing_required_fields(base_url, selenium): page = NewsletterPage(base_url, selenium).open() with pytest.raises(TimeoutException): page.click_sign_me_up()
mpl-2.0
drmrd/ansible
lib/ansible/modules/windows/win_dns_client.py
52
2086
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: win_dns_client version_added: "2.3" short_description: Configures DNS lookup on Windows hosts description: - The C(win_dns_client) module configures the DNS client on Windows network adapters. options: adapter_names: description: - Adapter name or list of adapter names for which to manage DNS settings ('*' is supported as a wildcard value). The adapter name used is the connection caption in the Network Control Panel or via C(Get-NetAdapter), eg C(Local Area Connection). required: yes ipv4_addresses: description: - Single or ordered list of DNS server IPv4 addresses to configure for lookup. An empty list will configure the adapter to use the DHCP-assigned values on connections where DHCP is enabled, or disable DNS lookup on statically-configured connections. required: yes notes: - When setting an empty list of DNS server addresses on an adapter with DHCP enabled, a change will always be registered, since it is not possible to detect the difference between a DHCP-sourced server value and one that is statically set. author: - Matt Davis (@nitzmahone) ''' EXAMPLES = r''' - name: Set a single address on the adapter named Ethernet win_dns_client: adapter_names: Ethernet ipv4_addresses: 192.168.34.5 - name: Set multiple lookup addresses on all visible adapters (usually physical adapters that are in the Up state), with debug logging to a file win_dns_client: adapter_names: '*' ipv4_addresses: - 192.168.34.5 - 192.168.34.6 log_path: C:\dns_log.txt - name: Configure all adapters whose names begin with Ethernet to use DHCP-assigned DNS values win_dns_client: adapter_names: 'Ethernet*' ipv4_addresses: [] ''' RETURN = ''' '''
gpl-3.0
WillGuan105/django
tests/sessions_tests/tests.py
116
30900
import base64 import os import shutil import string import sys import tempfile import unittest from datetime import timedelta from django.conf import settings from django.contrib.sessions.backends.cache import SessionStore as CacheSession from django.contrib.sessions.backends.cached_db import \ SessionStore as CacheDBSession from django.contrib.sessions.backends.db import SessionStore as DatabaseSession from django.contrib.sessions.backends.file import SessionStore as FileSession from django.contrib.sessions.backends.signed_cookies import \ SessionStore as CookieSession from django.contrib.sessions.exceptions import InvalidSessionKey from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sessions.models import Session from django.contrib.sessions.serializers import ( JSONSerializer, PickleSerializer, ) from django.core import management from django.core.cache import caches from django.core.cache.backends.base import InvalidCacheBackendError from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.test import ( RequestFactory, TestCase, ignore_warnings, override_settings, ) from django.test.utils import patch_logger from django.utils import six, timezone from django.utils.encoding import force_text from django.utils.six.moves import http_cookies from .custom_db_backend import SessionStore as CustomDatabaseSession class SessionTestsMixin(object): # This does not inherit from TestCase to avoid any tests being run with this # class, which wouldn't work, and to allow different TestCase subclasses to # be used. backend = None # subclasses must specify def setUp(self): self.session = self.backend() def tearDown(self): # NB: be careful to delete any sessions created; stale sessions fill up # the /tmp (with some backends) and eventually overwhelm it after lots # of runs (think buildbots) self.session.delete() def test_new_session(self): self.assertFalse(self.session.modified) self.assertFalse(self.session.accessed) def test_get_empty(self): self.assertEqual(self.session.get('cat'), None) def test_store(self): self.session['cat'] = "dog" self.assertTrue(self.session.modified) self.assertEqual(self.session.pop('cat'), 'dog') def test_pop(self): self.session['some key'] = 'exists' # Need to reset these to pretend we haven't accessed it: self.accessed = False self.modified = False self.assertEqual(self.session.pop('some key'), 'exists') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('some key'), None) def test_pop_default(self): self.assertEqual(self.session.pop('some key', 'does not exist'), 'does not exist') self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_setdefault(self): self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar') self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_update(self): self.session.update({'update key': 1}) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('update key', None), 1) def test_has_key(self): self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertIn('some key', self.session) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_values(self): self.assertEqual(list(self.session.values()), []) self.assertTrue(self.session.accessed) self.session['some key'] = 1 self.assertEqual(list(self.session.values()), [1]) def test_iterkeys(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iterkeys(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), ['x']) def test_itervalues(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.itervalues(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [1]) def test_iteritems(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iteritems(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [('x', 1)]) def test_clear(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.session.clear() self.assertEqual(list(self.session.items()), []) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_save(self): if (hasattr(self.session, '_cache') and 'DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']): raise unittest.SkipTest("Session saving tests require a real cache backend") self.session.save() self.assertTrue(self.session.exists(self.session.session_key)) def test_delete(self): self.session.save() self.session.delete(self.session.session_key) self.assertFalse(self.session.exists(self.session.session_key)) def test_flush(self): self.session['foo'] = 'bar' self.session.save() prev_key = self.session.session_key self.session.flush() self.assertFalse(self.session.exists(prev_key)) self.assertNotEqual(self.session.session_key, prev_key) self.assertIsNone(self.session.session_key) self.assertTrue(self.session.modified) self.assertTrue(self.session.accessed) def test_cycle(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_key = self.session.session_key prev_data = list(self.session.items()) self.session.cycle_key() self.assertNotEqual(self.session.session_key, prev_key) self.assertEqual(list(self.session.items()), prev_data) def test_save_doesnt_clear_data(self): self.session['a'] = 'b' self.session.save() self.assertEqual(self.session['a'], 'b') def test_invalid_key(self): # Submitting an invalid session key (either by guessing, or if the db has # removed the key) results in a new key being generated. try: session = self.backend('1') try: session.save() except AttributeError: self.fail( "The session object did not save properly. " "Middleware may be saving cache items without namespaces." ) self.assertNotEqual(session.session_key, '1') self.assertEqual(session.get('cat'), None) session.delete() finally: # Some backends leave a stale cache entry for the invalid # session key; make sure that entry is manually deleted session.delete('1') def test_session_key_empty_string_invalid(self): """Falsey values (Such as an empty string) are rejected.""" self.session._session_key = '' self.assertIsNone(self.session.session_key) def test_session_key_too_short_invalid(self): """Strings shorter than 8 characters are rejected.""" self.session._session_key = '1234567' self.assertIsNone(self.session.session_key) def test_session_key_valid_string_saved(self): """Strings of length 8 and up are accepted and stored.""" self.session._session_key = '12345678' self.assertEqual(self.session.session_key, '12345678') def test_session_key_is_read_only(self): def set_session_key(session): session.session_key = session._get_new_session_key() self.assertRaises(AttributeError, set_session_key, self.session) # Custom session expiry def test_default_expiry(self): # A normal session has a max age equal to settings self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) # So does a custom session with an idle expiration time of 0 (but it'll # expire at browser close) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_custom_expiry_seconds(self): modification = timezone.now() self.session.set_expiry(10) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_timedelta(self): modification = timezone.now() # Mock timezone.now, because set_expiry calls it on this code path. original_now = timezone.now try: timezone.now = lambda: modification self.session.set_expiry(timedelta(seconds=10)) finally: timezone.now = original_now date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_datetime(self): modification = timezone.now() self.session.set_expiry(modification + timedelta(seconds=10)) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_get_expire_at_browser_close(self): # Tests get_expire_at_browser_close with different settings and different # set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertFalse(self.session.get_expire_at_browser_close()) with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertTrue(self.session.get_expire_at_browser_close()) def test_decode(self): # Ensure we can decode what we encode data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session.decode(encoded), data) def test_decode_failure_logged_to_security(self): bad_encode = base64.b64encode(b'flaskdj:alkdjf') with patch_logger('django.security.SuspiciousSession', 'warning') as calls: self.assertEqual({}, self.session.decode(bad_encode)) # check that the failed decode is logged self.assertEqual(len(calls), 1) self.assertIn('corrupted', calls[0]) def test_actual_expiry(self): # this doesn't work with JSONSerializer (serializing timedelta) with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'): self.session = self.backend() # reinitialize after overriding settings # Regression test for #19200 old_session_key = None new_session_key = None try: self.session['foo'] = 'bar' self.session.set_expiry(-timedelta(seconds=10)) self.session.save() old_session_key = self.session.session_key # With an expiry date in the past, the session expires instantly. new_session = self.backend(self.session.session_key) new_session_key = new_session.session_key self.assertNotIn('foo', new_session) finally: self.session.delete(old_session_key) self.session.delete(new_session_key) def test_session_load_does_not_create_record(self): """ Loading an unknown session key does not create a session record. Creating session records on load is a DOS vulnerability. """ if self.backend is CookieSession: raise unittest.SkipTest("Cookie backend doesn't have an external store to create records in.") session = self.backend('someunknownkey') session.load() self.assertFalse(session.exists(session.session_key)) # provided unknown key was cycled, not reused self.assertNotEqual(session.session_key, 'someunknownkey') class DatabaseSessionTests(SessionTestsMixin, TestCase): backend = DatabaseSession session_engine = 'django.contrib.sessions.backends.db' @property def model(self): return self.backend.get_model_class() def test_session_str(self): "Session repr should be the session key." self.session['x'] = 1 self.session.save() session_key = self.session.session_key s = self.model.objects.get(session_key=session_key) self.assertEqual(force_text(s), session_key) def test_session_get_decoded(self): """ Test we can use Session.get_decoded to retrieve data stored in normal way """ self.session['x'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.get_decoded(), {'x': 1}) def test_sessionmanager_save(self): """ Test SessionManager.save method """ # Create a session self.session['y'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) # Change it self.model.objects.save(s.session_key, {'y': 2}, s.expire_date) # Clear cache, so that it will be retrieved from DB del self.session._session_cache self.assertEqual(self.session['y'], 2) def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ self.assertEqual(0, self.model.objects.count()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the database before clearsessions... self.assertEqual(2, self.model.objects.count()) with override_settings(SESSION_ENGINE=self.session_engine): management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, self.model.objects.count()) @override_settings(USE_TZ=True) class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests): pass class CustomDatabaseSessionTests(DatabaseSessionTests): backend = CustomDatabaseSession session_engine = 'sessions_tests.custom_db_backend' def test_extra_session_field(self): # Set the account ID to be picked up by a custom session storage # and saved to a custom session model database column. self.session['_auth_user_id'] = 42 self.session.save() # Make sure that the customized create_model_instance() was called. s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.account_id, 42) # Make the session "anonymous". self.session.pop('_auth_user_id') self.session.save() # Make sure that save() on an existing session did the right job. s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.account_id, None) class CacheDBSessionTests(SessionTestsMixin, TestCase): backend = CacheDBSession @unittest.skipIf('DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'], "Session saving tests require a real cache backend") def test_exists_searches_cache_first(self): self.session.save() with self.assertNumQueries(0): self.assertTrue(self.session.exists(self.session.session_key)) # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) @override_settings(SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS. self.assertRaises(InvalidCacheBackendError, self.backend) @override_settings(USE_TZ=True) class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests): pass # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class FileSessionTests(SessionTestsMixin, unittest.TestCase): backend = FileSession def setUp(self): # Do file session tests in an isolated directory, and kill it after we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp() # Reset the file session backend's internal caches if hasattr(self.backend, '_storage_path'): del self.backend._storage_path super(FileSessionTests, self).setUp() def tearDown(self): super(FileSessionTests, self).tearDown() settings.SESSION_FILE_PATH = self.original_session_file_path shutil.rmtree(self.temp_session_store) @override_settings( SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer") def test_configuration_check(self): del self.backend._storage_path # Make sure the file backend checks for a good storage dir self.assertRaises(ImproperlyConfigured, self.backend) def test_invalid_key_backslash(self): # Ensure we don't allow directory-traversal. # This is tested directly on _key_to_file, as load() will swallow # a SuspiciousOperation in the same way as an IOError - by creating # a new session, making it unclear whether the slashes were detected. self.assertRaises(InvalidSessionKey, self.backend()._key_to_file, "a\\b\\c") def test_invalid_key_forwardslash(self): # Ensure we don't allow directory-traversal self.assertRaises(InvalidSessionKey, self.backend()._key_to_file, "a/b/c") @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file") def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ storage_path = self.backend._get_storage_path() file_prefix = settings.SESSION_COOKIE_NAME def count_sessions(): return len([session_file for session_file in os.listdir(storage_path) if session_file.startswith(file_prefix)]) self.assertEqual(0, count_sessions()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the filesystem before clearsessions... self.assertEqual(2, count_sessions()) management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, count_sessions()) class CacheSessionTests(SessionTestsMixin, unittest.TestCase): backend = CacheSession # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) def test_default_cache(self): self.session.save() self.assertNotEqual(caches['default'].get(self.session.cache_key), None) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'sessions': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'session', }, }, SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # Re-initialize the session backend to make use of overridden settings. self.session = self.backend() self.session.save() self.assertEqual(caches['default'].get(self.session.cache_key), None) self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None) class SessionMiddlewareTests(TestCase): @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['secure']) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertIn(http_cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME])) @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertNotIn(http_cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME])) def test_session_save_on_500(self): request = RequestFactory().get('/') response = HttpResponse('Horrible error') response.status_code = 500 middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the value wasn't saved above. self.assertNotIn('hello', request.session.load()) def test_session_delete_on_end(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the cookie was deleted, not recreated. # A deleted cookie header looks like: # Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/ self.assertEqual( 'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; ' 'Max-Age=0; Path=/'.format( settings.SESSION_COOKIE_NAME, '""' if sys.version_info >= (3, 5) else '', ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) @override_settings(SESSION_COOKIE_DOMAIN='.example.local') def test_session_delete_on_end_with_custom_domain(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the cookie was deleted, not recreated. # A deleted cookie header with a custom domain looks like: # Set-Cookie: sessionid=; Domain=.example.local; # expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/ self.assertEqual( 'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, ' '01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/'.format( settings.SESSION_COOKIE_NAME, '""' if sys.version_info >= (3, 5) else '', ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) def test_flush_empty_without_session_cookie_doesnt_set_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # A cookie should not be set. self.assertEqual(response.cookies, {}) # The session is accessed so "Vary: Cookie" should be set. self.assertEqual(response['Vary'], 'Cookie') def test_empty_session_saved(self): """" If a session is emptied of data but still has a key, it should still be updated. """ request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Set a session key and some data. middleware.process_request(request) request.session['foo'] = 'bar' # Handle the response through the middleware. response = middleware.process_response(request, response) self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),)) # A cookie should be set, along with Vary: Cookie. self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response['Vary'], 'Cookie') # Empty the session data. del request.session['foo'] # Handle the response through the middleware. response = HttpResponse('Session test') response = middleware.process_response(request, response) self.assertEqual(dict(request.session.values()), {}) session = Session.objects.get(session_key=request.session.session_key) self.assertEqual(session.get_decoded(), {}) # While the session is empty, it hasn't been flushed so a cookie should # still be set, along with Vary: Cookie. self.assertGreater(len(request.session.session_key), 8) self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response['Vary'], 'Cookie') # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class CookieSessionTests(SessionTestsMixin, unittest.TestCase): backend = CookieSession def test_save(self): """ This test tested exists() in the other session backends, but that doesn't make sense for us. """ pass def test_cycle(self): """ This test tested cycle_key() which would create a new session key for the same session data. But we can't invalidate previously signed cookies (other than letting them expire naturally) so testing for this behavior is meaningless. """ pass @unittest.expectedFailure def test_actual_expiry(self): # The cookie backend doesn't handle non-default expiry dates, see #19201 super(CookieSessionTests, self).test_actual_expiry() def test_unpickling_exception(self): # signed_cookies backend should handle unpickle exceptions gracefully # by creating a new session self.assertEqual(self.session.serializer, JSONSerializer) self.session.save() self.session.serializer = PickleSerializer self.session.load()
bsd-3-clause
fireeye/flare-wmi
python-cim/samples/dump_keys.py
1
1204
import logging from cim.common import LoggingObject from cim import CIM from cim import is_index_page_number_valid logging.basicConfig(level=logging.DEBUG) g_logger = logging.getLogger("cim.printer") class Printer(LoggingObject): def __init__(self, cim): super(Printer, self).__init__() self._cim = cim def _printPageRec(self, page): for i in range(page.key_count): key = page.get_key(i) print(key.human_format) keyCount = page.key_count for i in range(keyCount + 1): childIndex = page.get_child(i) if not is_index_page_number_valid(childIndex): continue i = self._cim.logical_index_store self._printPageRec(i.get_page(childIndex)) def printKeys(self): i = self._cim.logical_index_store self._printPageRec(i.root_page) def main(type_, path, pageNum=None): if type_ not in ("xp", "win7"): raise RuntimeError("Invalid mapping type: {:s}".format(type_)) c = CIM(type_, path) p = Printer(c) p.printKeys() if __name__ == "__main__": logging.basicConfig(level=logging.INFO) import sys main(*sys.argv[1:])
apache-2.0
DavidNorman/tensorflow
tensorflow/python/keras/premade/wide_deep_test.py
5
13485
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Premade WideNDeep models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.feature_column import dense_features_v2 from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.premade import linear from tensorflow.python.keras.premade import wide_deep from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class WideDeepModelTest(keras_parameterized.TestCase): def test_wide_deep_model(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_backprop(self): with self.cached_session(): linear_model = linear.LinearModel(units=1, kernel_initializer='zeros') dnn_model = sequential.Sequential( [core.Dense(units=1, kernel_initializer='zeros')]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.array([1.]) dnn_inp = np.array([1.]) inputs = [linear_inp, dnn_inp] output = linear_inp + 2 * dnn_inp linear_opt = gradient_descent.SGD(learning_rate=.1) dnn_opt = gradient_descent.SGD(learning_rate=.3) wide_deep_model.compile( optimizer=[linear_opt, dnn_opt], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.evaluate(variables.global_variables_initializer()) wide_deep_model.fit(inputs, output, epochs=1) self.assertAllClose( [[0.3]], self.evaluate(wide_deep_model.linear_model.dense_layers[0].kernel)) self.assertAllClose([[0.9]], self.evaluate( wide_deep_model.dnn_model.layers[0].kernel)) def test_wide_deep_model_with_single_input(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inputs = np.random.uniform(low=-5, high=5, size=(64, 3)) output = .3 * inputs[:, 0] wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) def test_wide_deep_model_with_multi_outputs(self): with context.eager_mode(): inp = input_layer.Input(shape=(1,), name='linear') l = linear.LinearModel(units=2, use_bias=False)(inp) l1, l2 = array_ops.split(l, num_or_size_splits=2, axis=1) linear_model = training.Model(inp, [l1, l2]) linear_model.set_weights([np.asarray([[0.5, 0.3]])]) h = core.Dense(units=2, use_bias=False)(inp) h1, h2 = array_ops.split(h, num_or_size_splits=2, axis=1) dnn_model = training.Model(inp, [h1, h2]) dnn_model.set_weights([np.asarray([[0.1, -0.5]])]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) inp_np = np.asarray([[1.]]) out1, out2 = wide_deep_model(inp_np) # output should be 0.5 * (0.5 + 0.1), and 0.5 * (0.3 - 0.5) self.assertAllClose([[0.3]], out1) self.assertAllClose([[-0.1]], out2) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation='relu') out1, out2 = wide_deep_model(inp_np) # output should be relu(0.5 * (0.5 + 0.1)), and relu(0.5 * (0.3 - 0.5)) self.assertAllClose([[0.3]], out1) self.assertAllClose([[0.]], out2) def test_wide_deep_model_with_single_optimizer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] wide_deep_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=5) self.assertTrue(wide_deep_model.built) def test_wide_deep_model_as_layer(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1)]) linear_input = input_layer.Input(shape=(3,), name='linear') dnn_input = input_layer.Input(shape=(5,), name='dnn') wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) wide_deep_output = wide_deep_model((linear_input, dnn_input)) input_b = input_layer.Input(shape=(1,), name='b') output_b = core.Dense(units=1)(input_b) model = training.Model( inputs=[linear_input, dnn_input, input_b], outputs=[wide_deep_output + output_b]) linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3)) dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5)) input_b_np = np.random.uniform(low=-5, high=5, size=(64,)) output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5) def test_wide_deep_model_with_sub_model_trained(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear.LinearModel(units=1), sequential.Sequential([core.Dense(units=1, input_dim=3)])) linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2)) dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3)) inputs = [linear_inp, dnn_inp] output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1] linear_model.compile( optimizer='sgd', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) dnn_model.compile( optimizer='adam', loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) linear_model.fit(linear_inp, output, epochs=50) dnn_model.fit(dnn_inp, output, epochs=50) wide_deep_model.compile( optimizer=['sgd', 'adam'], loss='mse', metrics=[], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(inputs, output, epochs=50) # This test is an example for cases where linear and dnn model accepts # same raw input and same transformed inputs, i.e., the raw input is # categorical, and both linear and dnn model accept one hot encoding. def test_wide_deep_model_with_single_feature_column(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = fc.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = fc.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') dnn_model = sequential.Sequential([core.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) combined = sequential.Sequential([dense_feature_layer, wide_deep_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) # This test is an example for cases where linear and dnn model accepts # same raw input but different transformed inputs, i.e,. the raw input is # categorical, and linear model accepts one hot encoding, while dnn model # accepts embedding encoding. def test_wide_deep_model_with_two_feature_columns(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = fc.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = fc.indicator_column(cat_column) emb_column = fc.embedding_column(cat_column, dimension=5) linear_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') combined_linear = sequential.Sequential( [linear_feature_layer, linear_model]) dnn_model = sequential.Sequential([core.Dense(units=1)]) dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column]) combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model]) wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn) opt = gradient_descent.SGD(learning_rate=0.1) wide_deep_model.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) wide_deep_model.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10) self.assertEqual(3, linear_model.inputs[0].shape[1]) self.assertEqual(5, dnn_model.inputs[0].shape[1]) def test_config(self): linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config) self.assertEqual(linear_model.units, cloned_wide_deep_model.linear_model.units) self.assertEqual(dnn_model.layers[0].units, cloned_wide_deep_model.dnn_model.layers[0].units) def test_config_with_custom_objects(self): def my_activation(x): return x linear_model = linear.LinearModel(units=1) dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)]) wide_deep_model = wide_deep.WideDeepModel( linear_model, dnn_model, activation=my_activation) config = wide_deep_model.get_config() cloned_wide_deep_model = wide_deep.WideDeepModel.from_config( config, custom_objects={'my_activation': my_activation}) self.assertEqual(cloned_wide_deep_model.activation, my_activation) if __name__ == '__main__': test.main()
apache-2.0
moorescloud/holideck
examples/tow.py
3
6430
#/usr/bin/python # """Try to open a filtered search stream And grab the #qanda tagged tweets Then spit out the timestamp of the tweet. Which can then be used as an input to another script. BECAUSE MODADS. Homepage and documentation: http://dev.moorescloud.com/ Copyright (c) 2012, Mark Pesce. License: MIT (see LICENSE for details)""" __author__ = 'Mark Pesce' __version__ = '0.01.dev' __license__ = 'MIT' import sys, os, json, time, stat, threading, string, requests from twitter.oauth_dance import oauth_dance import twitter from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream import requests import holiday finished = False # File name for the oauth info # # This will work for *NIX systems, not sure for Windows. # fn = os.path.join(os.path.expanduser('~'),'.qanda-oauth') #consumer_secret=con_secret = "pG9hrZAUURqyDTfBbJcgAMdpemBmgAdZDL92ErVELY" #consumer_key=con_key = "JwCegsVjfjfK0GvsQkpUw" # New codes specific for the Twug-of-War twitter application consumer_secret=con_secret = "3dna5y8MUSXfIrtkuhqF8yDFwjRINirpoeuMomzHaU" consumer_key=con_key = "qxHfm0JSS5eZNmqqcuw" # Do we have the correct OAuth credentials? # If credentials exist, test them. # If they fail, delete them. # If they do not exist or fail, create them. # def check_twitter_auth(): authorized = False if os.path.isfile(fn): # Does the token file exist? tokens = twitter.oauth.read_token_file(fn) #print 'OAuth tokens exist, will try to authorize with them...' twapi = twitter.Twitter(auth = twitter.OAuth(token=tokens[0], token_secret=tokens[1], consumer_secret=con_secret, consumer_key=con_key)) try: result = twapi.account.verify_credentials() twitter_id = result['id'] twitter_handle = result['screen_name'] #print 'Good, we seem to be authorized for username %s with id %d' % (twitter_handle, int(twitter_id)) authorized = twapi except twitter.TwitterError as e: print "Call failed, we don't seem to be authorized with existing credentials. Deleting..." print e os.remove(fn) if authorized == False: # If not authorized, do the OAuth dance print 'Authorizing the app...' tokens = oauth_dance(app_name='CrypTweet', consumer_key=con_key, consumer_secret=con_secret, token_filename=fn) os.chmod(fn, stat.S_IRUSR | stat.S_IWUSR) # Read/write, user-only # # Get an open API object for Twitter # twapi = twitter.Twitter(auth = twitter.OAuth(token=tokens[0], token_secret=tokens[1], consumer_secret=con_secret, consumer_key=con_key)) try: # Is this going to work? result = twapi.account.verify_credentials() twitter_id = result['id'] twitter_handle = result['screen_name'] print 'Good, we seem to be authorized for username %s with id %d' % (twitter_handle, int(twitter_id)) authorized = twapi except twitter.TwitterError as e: # Something bad happening, abort, abort! print "Call failed, we don't seem to be authorized with new credentials. Deleting..." print e os.remove(fn) return authorized class StdOutListener(StreamListener): """ A listener handles tweets are the received from the stream. This is a basic listener that just prints received tweets to stdout. """ def on_data(self, data): global hashterm, towval, finished if finished: sys.exit(0) #print "Got data" djt = json.loads(data) try: msg = djt['text'] #print msg msglow = string.lower(msg) # Convert to lowercase for matching if string.find(msglow, hashterm[0]) == -1: if string.find(msglow, hashterm[1]) == -1: #print "No match in string, curious." pass else: towval -= 1 else: towval += 1 except KeyError: #print "KeyError, skipping..." pass tow_render() return True def on_error(self, status): print status class Listener(threading.Thread): """If you want to run the Twitter listener on its own thread, use this""" def start(self, term='NONE'): self.searchterm = term "Search term: %s" % self.searchterm super(Listener, self).start() def run(self): print "Listener.run %s" % self.searchterm global hashterm, auth, l stream = Stream(auth, l) stream.filter(track=[self.searchterm]) # Blocking call. We do not come back. def tow_render(): """Using the current tug-of-war values, render to the Holiday array""" global hol, towval, finished print "towval %d" % towval # Right side green, left side blue, center point yellow. # If the center point goes to zero or NUM_GLOBES, we exit, because we're done! # First, make the center hol.setglobe(towval, 0xFF, 0xFF, 0x00) # Yellow in the middle hol.setglobe(towval+1, 0xFF, 0xFF, 0x00) loopy = 0 while loopy < towval: hol.setglobe(loopy, 0x00, 0xff, 0x00) # Green to the left loopy += 1 loopy = towval + 2 while loopy < hol.NUM_GLOBES: hol.setglobe(loopy, 0x00, 0x00, 0xff) # Blue to the right loopy = loopy +1 hol.render() # And render to the Holiday # Finally, if we need to exit, do it here. if (towval < 0) or (towval >= hol.NUM_GLOBES): finished = True sys.exit(0) # And we're done return if __name__ == '__main__': """The two strings passed after command invocation are the two search terms for the tug-of-war""" if len(sys.argv) > 3: the_hostname = sys.argv[3] print the_hostname else: the_hostname = 'localhost:8080' hashterm = [] if len(sys.argv) > 2: hashterm.append(string.lower(sys.argv[1])) hashterm.append(string.lower(sys.argv[2])) print "using %s" % hashterm else: hashterm.append("snowden") hashterm.append("bieber") # You asked for it print "Using default: %s" % hashterm # Initialize the tug-of-war hol = holiday.Holiday(remote=True,addr=the_hostname) towval = hol.NUM_GLOBES / 2 tow_render() if (check_twitter_auth() == False): sys.exit() print "Authorized" tokens = twitter.oauth.read_token_file(fn) l = StdOutListener() auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(tokens[0], tokens[1]) # Make a thread for the stream listener print hashterm[0] macher1 = Listener() macher1.start(term=hashterm[0]) # Make a thread for the stream listener print hashterm[1] macher2 = Listener() macher2.start(term=hashterm[1]) #stream = Stream(auth, l) #stream.filter(track=[hashterm]) # Blocking call. We do not come back. # while True: # Process updates # time.sleep(.1)
mit
Safihre/cherrypy
cherrypy/lib/auth_basic.py
6
4421
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 """HTTP Basic Authentication tool. This module provides a CherryPy 3.x tool which implements the server-side of HTTP Basic Access Authentication, as described in :rfc:`2617`. Example usage, using the built-in checkpassword_dict function which uses a dict as the credentials store:: userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'} checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict) basic_auth = {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'earth', 'tools.auth_basic.checkpassword': checkpassword, 'tools.auth_basic.accept_charset': 'UTF-8', } app_config = { '/' : basic_auth } """ import binascii import unicodedata import base64 import cherrypy from cherrypy._cpcompat import ntou, tonative __author__ = 'visteya' __date__ = 'April 2009' def checkpassword_dict(user_password_dict): """Returns a checkpassword function which checks credentials against a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, use checkpassword_dict(my_credentials_dict) as the value for the checkpassword argument to basic_auth(). """ def checkpassword(realm, user, password): p = user_password_dict.get(user) return p and p == password or False return checkpassword def basic_auth(realm, checkpassword, debug=False, accept_charset='utf-8'): """A CherryPy tool which hooks at before_handler to perform HTTP Basic Access Authentication, as specified in :rfc:`2617` and :rfc:`7617`. If the request has an 'authorization' header with a 'Basic' scheme, this tool attempts to authenticate the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not 'Basic', or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Basic header. realm A string containing the authentication realm. checkpassword A callable which checks the authentication credentials. Its signature is checkpassword(realm, username, password). where username and password are the values obtained from the request's 'authorization' header. If authentication succeeds, checkpassword returns True, else it returns False. """ fallback_charset = 'ISO-8859-1' if '"' in realm: raise ValueError('Realm cannot contain the " (quote) character.') request = cherrypy.serving.request auth_header = request.headers.get('authorization') if auth_header is not None: # split() error, base64.decodestring() error msg = 'Bad Request' with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg): scheme, params = auth_header.split(' ', 1) if scheme.lower() == 'basic': charsets = accept_charset, fallback_charset decoded_params = base64.b64decode(params.encode('ascii')) decoded_params = _try_decode(decoded_params, charsets) decoded_params = ntou(decoded_params) decoded_params = unicodedata.normalize('NFC', decoded_params) decoded_params = tonative(decoded_params) username, password = decoded_params.split(':', 1) if checkpassword(realm, username, password): if debug: cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC') request.login = username return # successful authentication charset = accept_charset.upper() charset_declaration = ( (', charset="%s"' % charset) if charset != fallback_charset else '' ) # Respond with 401 status and a WWW-Authenticate header cherrypy.serving.response.headers['www-authenticate'] = ( 'Basic realm="%s"%s' % (realm, charset_declaration) ) raise cherrypy.HTTPError( 401, 'You are not authorized to access that resource') def _try_decode(subject, charsets): for charset in charsets[:-1]: try: return tonative(subject, charset) except ValueError: pass return tonative(subject, charsets[-1])
bsd-3-clause
invisiblek/python-for-android
python3-alpha/python3-src/Lib/encodings/cp874.py
272
12595
""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp874', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\u20ac' # 0x80 -> EURO SIGN '\ufffe' # 0x81 -> UNDEFINED '\ufffe' # 0x82 -> UNDEFINED '\ufffe' # 0x83 -> UNDEFINED '\ufffe' # 0x84 -> UNDEFINED '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS '\ufffe' # 0x86 -> UNDEFINED '\ufffe' # 0x87 -> UNDEFINED '\ufffe' # 0x88 -> UNDEFINED '\ufffe' # 0x89 -> UNDEFINED '\ufffe' # 0x8A -> UNDEFINED '\ufffe' # 0x8B -> UNDEFINED '\ufffe' # 0x8C -> UNDEFINED '\ufffe' # 0x8D -> UNDEFINED '\ufffe' # 0x8E -> UNDEFINED '\ufffe' # 0x8F -> UNDEFINED '\ufffe' # 0x90 -> UNDEFINED '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK '\u2022' # 0x95 -> BULLET '\u2013' # 0x96 -> EN DASH '\u2014' # 0x97 -> EM DASH '\ufffe' # 0x98 -> UNDEFINED '\ufffe' # 0x99 -> UNDEFINED '\ufffe' # 0x9A -> UNDEFINED '\ufffe' # 0x9B -> UNDEFINED '\ufffe' # 0x9C -> UNDEFINED '\ufffe' # 0x9D -> UNDEFINED '\ufffe' # 0x9E -> UNDEFINED '\ufffe' # 0x9F -> UNDEFINED '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG '\u0e0b' # 0xAB -> THAI CHARACTER SO SO '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE '\u0e0d' # 0xAD -> THAI CHARACTER YO YING '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG '\u0e19' # 0xB9 -> THAI CHARACTER NO NU '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG '\u0e1d' # 0xBD -> THAI CHARACTER FO FA '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO '\u0e21' # 0xC1 -> THAI CHARACTER MO MA '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA '\u0e24' # 0xC4 -> THAI CHARACTER RU '\u0e25' # 0xC5 -> THAI CHARACTER LO LING '\u0e26' # 0xC6 -> THAI CHARACTER LU '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA '\u0e2d' # 0xCD -> THAI CHARACTER O ANG '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI '\u0e30' # 0xD0 -> THAI CHARACTER SARA A '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM '\u0e34' # 0xD4 -> THAI CHARACTER SARA I '\u0e35' # 0xD5 -> THAI CHARACTER SARA II '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE '\u0e38' # 0xD8 -> THAI CHARACTER SARA U '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU '\ufffe' # 0xDB -> UNDEFINED '\ufffe' # 0xDC -> UNDEFINED '\ufffe' # 0xDD -> UNDEFINED '\ufffe' # 0xDE -> UNDEFINED '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT '\u0e40' # 0xE0 -> THAI CHARACTER SARA E '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE '\u0e42' # 0xE2 -> THAI CHARACTER SARA O '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN '\u0e50' # 0xF0 -> THAI DIGIT ZERO '\u0e51' # 0xF1 -> THAI DIGIT ONE '\u0e52' # 0xF2 -> THAI DIGIT TWO '\u0e53' # 0xF3 -> THAI DIGIT THREE '\u0e54' # 0xF4 -> THAI DIGIT FOUR '\u0e55' # 0xF5 -> THAI DIGIT FIVE '\u0e56' # 0xF6 -> THAI DIGIT SIX '\u0e57' # 0xF7 -> THAI DIGIT SEVEN '\u0e58' # 0xF8 -> THAI DIGIT EIGHT '\u0e59' # 0xF9 -> THAI DIGIT NINE '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT '\ufffe' # 0xFC -> UNDEFINED '\ufffe' # 0xFD -> UNDEFINED '\ufffe' # 0xFE -> UNDEFINED '\ufffe' # 0xFF -> UNDEFINED ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
fujunwei/chromium-crosswalk
third_party/cython/src/Cython/Compiler/AutoDocTransforms.py
96
8536
from Cython.Compiler.Visitor import CythonTransform from Cython.Compiler.StringEncoding import EncodedString from Cython.Compiler import Options from Cython.Compiler import PyrexTypes, ExprNodes class EmbedSignature(CythonTransform): def __init__(self, context): super(EmbedSignature, self).__init__(context) self.denv = None # XXX self.class_name = None self.class_node = None unop_precedence = 11 binop_precedence = { 'or': 1, 'and': 2, 'not': 3, 'in': 4, 'not in': 4, 'is': 4, 'is not': 4, '<': 4, '<=': 4, '>': 4, '>=': 4, '!=': 4, '==': 4, '|': 5, '^': 6, '&': 7, '<<': 8, '>>': 8, '+': 9, '-': 9, '*': 10, '/': 10, '//': 10, '%': 10, # unary: '+': 11, '-': 11, '~': 11 '**': 12} def _fmt_expr_node(self, node, precedence=0): if isinstance(node, ExprNodes.BinopNode) and not node.inplace: new_prec = self.binop_precedence.get(node.operator, 0) result = '%s %s %s' % (self._fmt_expr_node(node.operand1, new_prec), node.operator, self._fmt_expr_node(node.operand2, new_prec)) if precedence > new_prec: result = '(%s)' % result elif isinstance(node, ExprNodes.UnopNode): result = '%s%s' % (node.operator, self._fmt_expr_node(node.operand, self.unop_precedence)) if precedence > self.unop_precedence: result = '(%s)' % result elif isinstance(node, ExprNodes.AttributeNode): result = '%s.%s' % (self._fmt_expr_node(node.obj), node.attribute) else: result = node.name return result def _fmt_arg_defv(self, arg): default_val = arg.default if not default_val: return None try: denv = self.denv # XXX ctval = default_val.compile_time_value(self.denv) repr_val = repr(ctval) if isinstance(default_val, ExprNodes.UnicodeNode): if repr_val[:1] != 'u': return u'u%s' % repr_val elif isinstance(default_val, ExprNodes.BytesNode): if repr_val[:1] != 'b': return u'b%s' % repr_val elif isinstance(default_val, ExprNodes.StringNode): if repr_val[:1] in 'ub': return repr_val[1:] return repr_val except Exception: try: return self._fmt_expr_node(default_val) except AttributeError, e: return '<???>' def _fmt_arg(self, arg): if arg.type is PyrexTypes.py_object_type or arg.is_self_arg: doc = arg.name else: doc = arg.type.declaration_code(arg.name, for_display=1) if arg.default: arg_defv = self._fmt_arg_defv(arg) if arg_defv: doc = doc + ('=%s' % arg_defv) return doc def _fmt_arglist(self, args, npargs=0, pargs=None, nkargs=0, kargs=None, hide_self=False): arglist = [] for arg in args: if not hide_self or not arg.entry.is_self_arg: arg_doc = self._fmt_arg(arg) arglist.append(arg_doc) if pargs: arglist.insert(npargs, '*%s' % pargs.name) elif nkargs: arglist.insert(npargs, '*') if kargs: arglist.append('**%s' % kargs.name) return arglist def _fmt_ret_type(self, ret): if ret is PyrexTypes.py_object_type: return None else: return ret.declaration_code("", for_display=1) def _fmt_signature(self, cls_name, func_name, args, npargs=0, pargs=None, nkargs=0, kargs=None, return_type=None, hide_self=False): arglist = self._fmt_arglist(args, npargs, pargs, nkargs, kargs, hide_self=hide_self) arglist_doc = ', '.join(arglist) func_doc = '%s(%s)' % (func_name, arglist_doc) if cls_name: func_doc = '%s.%s' % (cls_name, func_doc) if return_type: ret_doc = self._fmt_ret_type(return_type) if ret_doc: func_doc = '%s -> %s' % (func_doc, ret_doc) return func_doc def _embed_signature(self, signature, node_doc): if node_doc: return "%s\n%s" % (signature, node_doc) else: return signature def __call__(self, node): if not Options.docstrings: return node else: return super(EmbedSignature, self).__call__(node) def visit_ClassDefNode(self, node): oldname = self.class_name oldclass = self.class_node self.class_node = node try: # PyClassDefNode self.class_name = node.name except AttributeError: # CClassDefNode self.class_name = node.class_name self.visitchildren(node) self.class_name = oldname self.class_node = oldclass return node def visit_DefNode(self, node): if not self.current_directives['embedsignature']: return node is_constructor = False hide_self = False if node.entry.is_special: is_constructor = self.class_node and node.name == '__init__' if not is_constructor: return node class_name, func_name = None, self.class_name hide_self = True else: class_name, func_name = self.class_name, node.name nkargs = getattr(node, 'num_kwonly_args', 0) npargs = len(node.args) - nkargs signature = self._fmt_signature( class_name, func_name, node.args, npargs, node.star_arg, nkargs, node.starstar_arg, return_type=None, hide_self=hide_self) if signature: if is_constructor: doc_holder = self.class_node.entry.type.scope else: doc_holder = node.entry if doc_holder.doc is not None: old_doc = doc_holder.doc elif not is_constructor and getattr(node, 'py_func', None) is not None: old_doc = node.py_func.entry.doc else: old_doc = None new_doc = self._embed_signature(signature, old_doc) doc_holder.doc = EncodedString(new_doc) if not is_constructor and getattr(node, 'py_func', None) is not None: node.py_func.entry.doc = EncodedString(new_doc) return node def visit_CFuncDefNode(self, node): if not self.current_directives['embedsignature']: return node if not node.overridable: # not cpdef FOO(...): return node signature = self._fmt_signature( self.class_name, node.declarator.base.name, node.declarator.args, return_type=node.return_type) if signature: if node.entry.doc is not None: old_doc = node.entry.doc elif getattr(node, 'py_func', None) is not None: old_doc = node.py_func.entry.doc else: old_doc = None new_doc = self._embed_signature(signature, old_doc) node.entry.doc = EncodedString(new_doc) if hasattr(node, 'py_func') and node.py_func is not None: node.py_func.entry.doc = EncodedString(new_doc) return node def visit_PropertyNode(self, node): if not self.current_directives['embedsignature']: return node entry = node.entry if entry.visibility == 'public': # property synthesised from a cdef public attribute type_name = entry.type.declaration_code("", for_display=1) if not entry.type.is_pyobject: type_name = "'%s'" % type_name elif entry.type.is_extension_type: type_name = entry.type.module_name + '.' + type_name signature = '%s: %s' % (entry.name, type_name) new_doc = self._embed_signature(signature, entry.doc) entry.doc = EncodedString(new_doc) return node
bsd-3-clause
WebSpider/headphones
lib/musicbrainzngs/mbxml.py
11
24580
# This file is part of the musicbrainzngs library # Copyright (C) Alastair Porter, Adrian Sampson, and others # This file is distributed under a BSD-2-Clause type license. # See the COPYING file for more information. import re import xml.etree.ElementTree as ET import logging from musicbrainzngs import util try: from ET import fixtag except: # Python < 2.7 def fixtag(tag, namespaces): # given a decorated tag (of the form {uri}tag), return prefixed # tag and namespace declaration, if any if isinstance(tag, ET.QName): tag = tag.text namespace_uri, tag = tag[1:].split("}", 1) prefix = namespaces.get(namespace_uri) if prefix is None: prefix = "ns%d" % len(namespaces) namespaces[namespace_uri] = prefix if prefix == "xml": xmlns = None else: xmlns = ("xmlns:%s" % prefix, namespace_uri) else: xmlns = None return "%s:%s" % (prefix, tag), xmlns NS_MAP = {"http://musicbrainz.org/ns/mmd-2.0#": "ws2", "http://musicbrainz.org/ns/ext#-2.0": "ext"} _log = logging.getLogger("musicbrainzngs") def get_error_message(error): """ Given an error XML message from the webservice containing <error><text>x</text><text>y</text></error>, return a list of [x, y]""" try: tree = util.bytes_to_elementtree(error) root = tree.getroot() errors = [] if root.tag == "error": for ch in root: if ch.tag == "text": errors.append(ch.text) return errors except ET.ParseError: return None def make_artist_credit(artists): names = [] for artist in artists: if isinstance(artist, dict): if "name" in artist: names.append(artist.get("name", "")) else: names.append(artist.get("artist", {}).get("name", "")) else: names.append(artist) return "".join(names) def parse_elements(valid_els, inner_els, element): """ Extract single level subelements from an element. For example, given the element: <element> <subelement>Text</subelement> </element> and a list valid_els that contains "subelement", return a dict {'subelement': 'Text'} Delegate the parsing of multi-level subelements to another function. For example, given the element: <element> <subelement> <a>Foo</a><b>Bar</b> </subelement> </element> and a dictionary {'subelement': parse_subelement}, call parse_subelement(<subelement>) and return a dict {'subelement': <result>} if parse_subelement returns a tuple of the form ('subelement-key', <result>) then return a dict {'subelement-key': <result>} instead """ result = {} for sub in element: t = fixtag(sub.tag, NS_MAP)[0] if ":" in t: t = t.split(":")[1] if t in valid_els: result[t] = sub.text or "" elif t in inner_els.keys(): inner_result = inner_els[t](sub) if isinstance(inner_result, tuple): result[inner_result[0]] = inner_result[1] else: result[t] = inner_result # add counts for lists when available m = re.match(r'([a-z0-9-]+)-list', t) if m and "count" in sub.attrib: result["%s-count" % m.group(1)] = int(sub.attrib["count"]) else: _log.info("in <%s>, uncaught <%s>", fixtag(element.tag, NS_MAP)[0], t) return result def parse_attributes(attributes, element): """ Extract attributes from an element. For example, given the element: <element type="Group" /> and a list attributes that contains "type", return a dict {'type': 'Group'} """ result = {} for attr in element.attrib: if "{" in attr: a = fixtag(attr, NS_MAP)[0] else: a = attr if a in attributes: result[a] = element.attrib[attr] else: _log.info("in <%s>, uncaught attribute %s", fixtag(element.tag, NS_MAP)[0], attr) return result def parse_message(message): tree = util.bytes_to_elementtree(message) root = tree.getroot() result = {} valid_elements = {"area": parse_area, "artist": parse_artist, "label": parse_label, "place": parse_place, "release": parse_release, "release-group": parse_release_group, "series": parse_series, "recording": parse_recording, "work": parse_work, "url": parse_url, "disc": parse_disc, "cdstub": parse_cdstub, "isrc": parse_isrc, "annotation-list": parse_annotation_list, "area-list": parse_area_list, "artist-list": parse_artist_list, "label-list": parse_label_list, "place-list": parse_place_list, "release-list": parse_release_list, "release-group-list": parse_release_group_list, "series-list": parse_series_list, "recording-list": parse_recording_list, "work-list": parse_work_list, "url-list": parse_url_list, "collection-list": parse_collection_list, "collection": parse_collection, "message": parse_response_message } result.update(parse_elements([], valid_elements, root)) return result def parse_response_message(message): return parse_elements(["text"], {}, message) def parse_collection_list(cl): return [parse_collection(c) for c in cl] def parse_collection(collection): result = {} attribs = ["id"] elements = ["name", "editor"] inner_els = {"release-list": parse_release_list} result.update(parse_attributes(attribs, collection)) result.update(parse_elements(elements, inner_els, collection)) return result def parse_annotation_list(al): return [parse_annotation(a) for a in al] def parse_annotation(annotation): result = {} attribs = ["type", "ext:score"] elements = ["entity", "name", "text"] result.update(parse_attributes(attribs, annotation)) result.update(parse_elements(elements, {}, annotation)) return result def parse_lifespan(lifespan): parts = parse_elements(["begin", "end", "ended"], {}, lifespan) return parts def parse_area_list(al): return [parse_area(a) for a in al] def parse_area(area): result = {} attribs = ["id", "type", "ext:score"] elements = ["name", "sort-name", "disambiguation"] inner_els = {"life-span": parse_lifespan, "alias-list": parse_alias_list, "relation-list": parse_relation_list, "annotation": parse_annotation, "iso-3166-1-code-list": parse_element_list, "iso-3166-2-code-list": parse_element_list, "iso-3166-3-code-list": parse_element_list} result.update(parse_attributes(attribs, area)) result.update(parse_elements(elements, inner_els, area)) return result def parse_artist_list(al): return [parse_artist(a) for a in al] def parse_artist(artist): result = {} attribs = ["id", "type", "ext:score"] elements = ["name", "sort-name", "country", "user-rating", "disambiguation", "gender", "ipi"] inner_els = {"area": parse_area, "begin-area": parse_area, "end-area": parse_area, "life-span": parse_lifespan, "recording-list": parse_recording_list, "relation-list": parse_relation_list, "release-list": parse_release_list, "release-group-list": parse_release_group_list, "work-list": parse_work_list, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "rating": parse_rating, "ipi-list": parse_element_list, "isni-list": parse_element_list, "alias-list": parse_alias_list, "annotation": parse_annotation} result.update(parse_attributes(attribs, artist)) result.update(parse_elements(elements, inner_els, artist)) return result def parse_coordinates(c): return parse_elements(['latitude', 'longitude'], {}, c) def parse_place_list(pl): return [parse_place(p) for p in pl] def parse_place(place): result = {} attribs = ["id", "type", "ext:score"] elements = ["name", "address", "ipi", "disambiguation"] inner_els = {"area": parse_area, "coordinates": parse_coordinates, "life-span": parse_lifespan, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "alias-list": parse_alias_list, "relation-list": parse_relation_list, "annotation": parse_annotation} result.update(parse_attributes(attribs, place)) result.update(parse_elements(elements, inner_els, place)) return result def parse_label_list(ll): return [parse_label(l) for l in ll] def parse_label(label): result = {} attribs = ["id", "type", "ext:score"] elements = ["name", "sort-name", "country", "label-code", "user-rating", "ipi", "disambiguation"] inner_els = {"area": parse_area, "life-span": parse_lifespan, "release-list": parse_release_list, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "rating": parse_rating, "ipi-list": parse_element_list, "alias-list": parse_alias_list, "relation-list": parse_relation_list, "annotation": parse_annotation} result.update(parse_attributes(attribs, label)) result.update(parse_elements(elements, inner_els, label)) return result def parse_relation_target(tgt): attributes = parse_attributes(['id'], tgt) if 'id' in attributes: return ('target-id', attributes['id']) else: return ('target-id', tgt.text) def parse_relation_list(rl): attribs = ["target-type"] ttype = parse_attributes(attribs, rl) key = "%s-relation-list" % ttype["target-type"] return (key, [parse_relation(r) for r in rl]) def parse_relation(relation): result = {} attribs = ["type", "type-id"] elements = ["target", "direction", "begin", "end", "ended", "ordering-key"] inner_els = {"area": parse_area, "artist": parse_artist, "label": parse_label, "place": parse_place, "recording": parse_recording, "release": parse_release, "release-group": parse_release_group, "series": parse_series, "attribute-list": parse_element_list, "work": parse_work, "target": parse_relation_target } result.update(parse_attributes(attribs, relation)) result.update(parse_elements(elements, inner_els, relation)) return result def parse_release(release): result = {} attribs = ["id", "ext:score"] elements = ["title", "status", "disambiguation", "quality", "country", "barcode", "date", "packaging", "asin"] inner_els = {"text-representation": parse_text_representation, "artist-credit": parse_artist_credit, "label-info-list": parse_label_info_list, "medium-list": parse_medium_list, "release-group": parse_release_group, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "relation-list": parse_relation_list, "annotation": parse_annotation, "cover-art-archive": parse_caa, "release-event-list": parse_release_event_list} result.update(parse_attributes(attribs, release)) result.update(parse_elements(elements, inner_els, release)) if "artist-credit" in result: result["artist-credit-phrase"] = make_artist_credit( result["artist-credit"]) return result def parse_medium_list(ml): return [parse_medium(m) for m in ml] def parse_release_event_list(rel): return [parse_release_event(re) for re in rel] def parse_release_event(event): result = {} elements = ["date"] inner_els = {"area": parse_area} result.update(parse_elements(elements, inner_els, event)) return result def parse_medium(medium): result = {} elements = ["position", "format", "title"] inner_els = {"disc-list": parse_disc_list, "track-list": parse_track_list} result.update(parse_elements(elements, inner_els, medium)) return result def parse_disc_list(dl): return [parse_disc(d) for d in dl] def parse_text_representation(textr): return parse_elements(["language", "script"], {}, textr) def parse_release_group(rg): result = {} attribs = ["id", "type", "ext:score"] elements = ["title", "user-rating", "first-release-date", "primary-type", "disambiguation"] inner_els = {"artist-credit": parse_artist_credit, "release-list": parse_release_list, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "secondary-type-list": parse_element_list, "relation-list": parse_relation_list, "rating": parse_rating, "annotation": parse_annotation} result.update(parse_attributes(attribs, rg)) result.update(parse_elements(elements, inner_els, rg)) if "artist-credit" in result: result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) return result def parse_recording(recording): result = {} attribs = ["id", "ext:score"] elements = ["title", "length", "user-rating", "disambiguation", "video"] inner_els = {"artist-credit": parse_artist_credit, "release-list": parse_release_list, "tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "rating": parse_rating, "isrc-list": parse_external_id_list, "echoprint-list": parse_external_id_list, "relation-list": parse_relation_list, "annotation": parse_annotation} result.update(parse_attributes(attribs, recording)) result.update(parse_elements(elements, inner_els, recording)) if "artist-credit" in result: result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) return result def parse_series_list(sl): return [parse_series(s) for s in sl] def parse_series(series): result = {} attribs = ["id", "type", "ext:score"] elements = ["name", "disambiguation"] inner_els = {"alias-list": parse_alias_list, "relation-list": parse_relation_list, "annotation": parse_annotation} result.update(parse_attributes(attribs, series)) result.update(parse_elements(elements, inner_els, series)) return result def parse_external_id_list(pl): return [parse_attributes(["id"], p)["id"] for p in pl] def parse_element_list(el): return [e.text for e in el] def parse_work_list(wl): return [parse_work(w) for w in wl] def parse_work(work): result = {} attribs = ["id", "ext:score", "type"] elements = ["title", "user-rating", "language", "iswc", "disambiguation"] inner_els = {"tag-list": parse_tag_list, "user-tag-list": parse_tag_list, "rating": parse_rating, "alias-list": parse_alias_list, "iswc-list": parse_element_list, "relation-list": parse_relation_list, "annotation": parse_response_message, "attribute-list": parse_work_attribute_list } result.update(parse_attributes(attribs, work)) result.update(parse_elements(elements, inner_els, work)) return result def parse_work_attribute_list(wal): return [parse_work_attribute(wa) for wa in wal] def parse_work_attribute(wa): result = {} attribs = ["type"] result.update(parse_attributes(attribs, wa)) result["attribute"] = wa.text return result def parse_url_list(ul): return [parse_url(u) for u in ul] def parse_url(url): result = {} attribs = ["id"] elements = ["resource"] inner_els = {"relation-list": parse_relation_list} result.update(parse_attributes(attribs, url)) result.update(parse_elements(elements, inner_els, url)) return result def parse_disc(disc): result = {} attribs = ["id"] elements = ["sectors"] inner_els = {"release-list": parse_release_list} result.update(parse_attributes(attribs, disc)) result.update(parse_elements(elements, inner_els, disc)) return result def parse_cdstub(cdstub): result = {} attribs = ["id"] elements = ["title", "artist", "barcode"] inner_els = {"track-list": parse_track_list} result.update(parse_attributes(attribs, cdstub)) result.update(parse_elements(elements, inner_els, cdstub)) return result def parse_release_list(rl): result = [] for r in rl: result.append(parse_release(r)) return result def parse_release_group_list(rgl): result = [] for rg in rgl: result.append(parse_release_group(rg)) return result def parse_isrc(isrc): result = {} attribs = ["id"] inner_els = {"recording-list": parse_recording_list} result.update(parse_attributes(attribs, isrc)) result.update(parse_elements([], inner_els, isrc)) return result def parse_recording_list(recs): result = [] for r in recs: result.append(parse_recording(r)) return result def parse_artist_credit(ac): result = [] for namecredit in ac: result.append(parse_name_credit(namecredit)) join = parse_attributes(["joinphrase"], namecredit) if "joinphrase" in join: result.append(join["joinphrase"]) return result def parse_name_credit(nc): result = {} elements = ["name"] inner_els = {"artist": parse_artist} result.update(parse_elements(elements, inner_els, nc)) return result def parse_label_info_list(lil): result = [] for li in lil: result.append(parse_label_info(li)) return result def parse_label_info(li): result = {} elements = ["catalog-number"] inner_els = {"label": parse_label} result.update(parse_elements(elements, inner_els, li)) return result def parse_track_list(tl): result = [] for t in tl: result.append(parse_track(t)) return result def parse_track(track): result = {} attribs = ["id"] elements = ["number", "position", "title", "length"] inner_els = {"recording": parse_recording, "artist-credit": parse_artist_credit} result.update(parse_attributes(attribs, track)) result.update(parse_elements(elements, inner_els, track)) if "artist-credit" in result.get("recording", {}) and "artist-credit" not in result: result["artist-credit"] = result["recording"]["artist-credit"] if "artist-credit" in result: result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) # Make a length field that contains track length or recording length track_or_recording = None if "length" in result: track_or_recording = result["length"] elif result.get("recording", {}).get("length"): track_or_recording = result.get("recording", {}).get("length") if track_or_recording: result["track_or_recording_length"] = track_or_recording return result def parse_tag_list(tl): return [parse_tag(t) for t in tl] def parse_tag(tag): result = {} attribs = ["count"] elements = ["name"] result.update(parse_attributes(attribs, tag)) result.update(parse_elements(elements, {}, tag)) return result def parse_rating(rating): result = {} attribs = ["votes-count"] result.update(parse_attributes(attribs, rating)) result["rating"] = rating.text return result def parse_alias_list(al): return [parse_alias(a) for a in al] def parse_alias(alias): result = {} attribs = ["locale", "sort-name", "type", "primary", "begin-date", "end-date"] result.update(parse_attributes(attribs, alias)) result["alias"] = alias.text return result def parse_caa(caa_element): result = {} elements = ["artwork", "count", "front", "back", "darkened"] result.update(parse_elements(elements, {}, caa_element)) return result ### def make_barcode_request(release2barcode): NS = "http://musicbrainz.org/ns/mmd-2.0#" root = ET.Element("{%s}metadata" % NS) rel_list = ET.SubElement(root, "{%s}release-list" % NS) for release, barcode in release2barcode.items(): rel_xml = ET.SubElement(rel_list, "{%s}release" % NS) bar_xml = ET.SubElement(rel_xml, "{%s}barcode" % NS) rel_xml.set("{%s}id" % NS, release) bar_xml.text = barcode return ET.tostring(root, "utf-8") def make_tag_request(**kwargs): NS = "http://musicbrainz.org/ns/mmd-2.0#" root = ET.Element("{%s}metadata" % NS) for entity_type in ['artist', 'label', 'place', 'recording', 'release', 'release_group', 'work']: entity_tags = kwargs.pop(entity_type + '_tags', None) if entity_tags is not None: e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-'))) for e, tags in entity_tags.items(): e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-'))) e_xml.set("{%s}id" % NS, e) taglist = ET.SubElement(e_xml, "{%s}user-tag-list" % NS) for tag in tags: usertag_xml = ET.SubElement(taglist, "{%s}user-tag" % NS) name_xml = ET.SubElement(usertag_xml, "{%s}name" % NS) name_xml.text = tag if kwargs.keys(): raise TypeError("make_tag_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0]) return ET.tostring(root, "utf-8") def make_rating_request(**kwargs): NS = "http://musicbrainz.org/ns/mmd-2.0#" root = ET.Element("{%s}metadata" % NS) for entity_type in ['artist', 'label', 'recording', 'release_group', 'work']: entity_ratings = kwargs.pop(entity_type + '_ratings', None) if entity_ratings is not None: e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-'))) for e, rating in entity_ratings.items(): e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-'))) e_xml.set("{%s}id" % NS, e) rating_xml = ET.SubElement(e_xml, "{%s}user-rating" % NS) rating_xml.text = str(rating) if kwargs.keys(): raise TypeError("make_rating_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0]) return ET.tostring(root, "utf-8") def make_isrc_request(recording2isrcs): NS = "http://musicbrainz.org/ns/mmd-2.0#" root = ET.Element("{%s}metadata" % NS) rec_list = ET.SubElement(root, "{%s}recording-list" % NS) for rec, isrcs in recording2isrcs.items(): if len(isrcs) > 0: rec_xml = ET.SubElement(rec_list, "{%s}recording" % NS) rec_xml.set("{%s}id" % NS, rec) isrc_list_xml = ET.SubElement(rec_xml, "{%s}isrc-list" % NS) isrc_list_xml.set("{%s}count" % NS, str(len(isrcs))) for isrc in isrcs: isrc_xml = ET.SubElement(isrc_list_xml, "{%s}isrc" % NS) isrc_xml.set("{%s}id" % NS, isrc) return ET.tostring(root, "utf-8")
gpl-3.0
CVL-GitHub/karaage
conf/settings.py
2
9073
# -*- coding: utf-8 -*- # Globally defined Karaage settings # These settings will be used for karaage-admin and karaage-registration. # Some of these values have sensible defaults. Settings that don't have a # sensible default must be configured manually. # Other Django settings are also possible, this list is not a comprehensive # list of all settings. # Copyright 2010-2011, 2014-2015 VPAC # Copyright 2010-2011 The University of Melbourne # # Django settings # # A boolean that turns on/off debug mode. # # Never deploy a site into production with DEBUG turned on. # # Did you catch that? NEVER deploy a site into production with DEBUG turned on. # # One of the main features of debug mode is the display of detailed error # pages. If your app raises an exception when DEBUG is True, Django will # display a detailed traceback, including a lot of metadata about your # environment, such as all the currently defined Django settings (from # settings.py). # # default: DEBUG = False # # DEBUG = True # FQDN host, used in default settings for :setting:`ALLOWED_HOSTS`, # :setting:`REGISTRATION_BASE_URL`, and :setting:`ADMIN_BASE_URL`. # # default: HTTP_HOST = FQDN hostname # # HTTP_HOST = "localhost" # A list of strings representing the host/domain names that this Django site # can serve. This is a security measure to prevent an attacker from poisoning # caches and password reset emails with links to malicious hosts by submitting # requests with a fake HTTP Host header, which is possible even under many # seemingly-safe web server configurations. # # %(HOST) will be substituted with the HTTP_HOST setting. # # default: ALLOWED_HOSTS = ["%(HOST)s"] # # ALLOWED_HOSTS = ["www.example.org"] # Whether to use a secure cookie for the session cookie. If this is set to # True, the cookie will be marked as “secure,” which means browsers may ensure # that the cookie is only sent under an HTTPS connection. # # default: SESSION_COOKIE_SECURE = True # # SESSION_COOKIE_SECURE = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) # A tuple in the same format as ADMINS that specifies who should get broken # link notifications when BrokenLinkEmailsMiddleware is enabled. MANAGERS = ADMINS # A dictionary containing the settings for all databases to be used with # Django. It is a nested dictionary whose contents maps database aliases to a # dictionary containing the options for an individual database. DATABASES = { 'default': { 'ENGINE': 'django.db.backends.dummy', 'NAME': '', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', 'ATOMIC_REQUESTS': True, } } # The email address that error messages come from, such as those sent to ADMINS # and MANAGERS. SERVER_EMAIL = 'karaage@example.org' # The host to use for sending email. EMAIL_HOST = 'localhost' # Subject-line prefix for email messages sent with django.core.mail.mail_admins # or django.core.mail.mail_managers. You’ll probably want to include the # trailing space. EMAIL_SUBJECT_PREFIX = '[Karaage] - ' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Australia/Melbourne' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-au' # A secret key for a particular Django installation. This is used to provide # cryptographic signing, and should be set to a unique, unpredictable value. SECRET_KEY = '' # A data structure containing configuration information. The contents of this # data structure will be passed as the argument to the configuration method # described in LOGGING_CONFIG. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s ' '%(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' }, 'django_file': { 'level': 'WARNING', 'class': 'karaage.common.logging.FileHandler', 'filename': '/var/log/karaage3/django.log', 'formatter': 'verbose', 'owner': ['www-data', 'www-data'], }, 'karaage_file': { 'level': 'WARNING', 'class': 'karaage.common.logging.FileHandler', 'filename': '/var/log/karaage3/karaage.log', 'formatter': 'verbose', 'owner': ['www-data', 'www-data'], }, # 'ldap_file': { # 'level': 'DEBUG', # 'class': 'karaage.common.logging.FileHandler', # 'filename': '/var/log/karaage3/ldap.log', # 'formatter': 'verbose', # 'owner': ['www-data', 'www-data'], # }, # 'mam_file': { # 'level': 'DEBUG', # 'class': 'karaage.common.logging.FileHandler', # 'filename': '/var/log/karaage3/mam.log', # 'formatter': 'verbose', # 'owner': ['www-data', 'www-data'], # }, # 'slurm_file': { # 'level': 'DEBUG', # 'class': 'karaage.common.logging.FileHandler', # 'filename': '/var/log/karaage3/slurm.log', # 'formatter': 'verbose', # 'owner': ['www-data', 'www-data'], # }, }, 'loggers': { '': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['django_file'], 'level': 'DEBUG', 'propagate': True, }, 'karaage': { 'handlers': ['karaage_file'], 'level': 'DEBUG', 'propagate': True, }, # 'karaage.datastores.ldap': { # 'handlers': ['ldap_file'], # 'level': 'DEBUG', # 'propagate': True, # }, # 'karaage.datastores.mam': { # 'handlers': ['mam_file'], # 'level': 'DEBUG', # 'propagate': True, # }, # 'karaage.datastores.slurm': { # 'handlers': ['slurm_file'], # 'level': 'DEBUG', # 'propagate': True, # }, }, } # # Karaage settings # # Users are advised to contact this address if having problems. # This is also used as the from address in outgoing emails. ACCOUNTS_EMAIL = 'accounts@example.com' # This organisation name, used in outgoing emails. ACCOUNTS_ORG_NAME = 'Example' # Registration base URL - Used in email templates # Uncomment to override default # # %(HOST) will be substituted with the HTTP_HOST setting. # # default: REGISTRATION_BASE_URL = 'https://%(HOST)s/users' # # REGISTRATION_BASE_URL = 'https://accounts.example.org/users' # Admin base URL - Used in email templates # Uncomment to override default # # %(HOST) will be substituted with the HTTP_HOST setting. # # default: ADMIN_BASE_URL = 'https://%(HOST)s/kgadmin' # # ADMIN_BASE_URL = 'https://accounts.example.org/kgadmin' # Is Shibboleth supported? # # default: SHIB_SUPPORTED = False # # SHIB_SUPPORTED = True # Path to AUP policy. Note that setting this will not disable the Karaage # default page, it might be better to replace the AUP with a file in # the templates directory ``karaage/common/aup-detail.html`` if required. # # default: Django template ``karaage/common/aup-detail.html`` # # AUP_URL = "https://site.example.org/users/aup/" # Do we allow anonymous users to request accounts? # # default: ALLOW_REGISTRATIONS = False # # ALLOW_REGISTRATIONS = True # Do we allow any logged in user to access all usage information? # # default: USAGE_IS_PUBLIC = True # # USAGE_IS_PUBLIC = False # Settings to restrict the valid list of email addresses we allow in # applications. EMAIL_MATCH_TYPE can be "include" or "exclude". If "include" # then the email address must match one of the RE entries in EMAIL_MATCH_LIST. # If "exclude" then then email address must not match of the the RE entries in # EMAIL_MATCH_LIST. # # default: allow any email address # # EMAIL_MATCH_TYPE="include" # EMAIL_MATCH_LIST=["@vpac.org$", "@v3.org.au$", "^tux@.*au$"] # List of Karaage plugins # # default: PLUGINS = [] # # PLUGINS = [ # 'karaage.plugins.kgapplications.plugin', # 'karaage.plugins.kgsoftware.plugin', # 'karaage.plugins.kgsoftware.applications.plugin', # 'karaage.plugins.kgusage.plugin', # ]
gpl-3.0
dengshaodong/docker-apprtc
src/third_party/oauth2client/client.py
122
44282
# Copyright (C) 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An OAuth 2.0 client. Tools for interacting with OAuth 2.0 protected resources. """ __author__ = 'jcgregorio@google.com (Joe Gregorio)' import base64 import clientsecrets import copy import datetime import httplib2 import logging import os import sys import time import urllib import urlparse from oauth2client import GOOGLE_AUTH_URI from oauth2client import GOOGLE_REVOKE_URI from oauth2client import GOOGLE_TOKEN_URI from oauth2client import util from oauth2client.anyjson import simplejson HAS_OPENSSL = False HAS_CRYPTO = False try: from oauth2client import crypt HAS_CRYPTO = True if crypt.OpenSSLVerifier is not None: HAS_OPENSSL = True except ImportError: pass try: from urlparse import parse_qsl except ImportError: from cgi import parse_qsl logger = logging.getLogger(__name__) # Expiry is stored in RFC3339 UTC format EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # Which certs to use to validate id_tokens received. ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs' # Constant to use for the out of band OAuth 2.0 flow. OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob' # Google Data client libraries may need to set this to [401, 403]. REFRESH_STATUS_CODES = [401] class Error(Exception): """Base error for this module.""" class FlowExchangeError(Error): """Error trying to exchange an authorization grant for an access token.""" class AccessTokenRefreshError(Error): """Error trying to refresh an expired access token.""" class TokenRevokeError(Error): """Error trying to revoke a token.""" class UnknownClientSecretsFlowError(Error): """The client secrets file called for an unknown type of OAuth 2.0 flow. """ class AccessTokenCredentialsError(Error): """Having only the access_token means no refresh is possible.""" class VerifyJwtTokenError(Error): """Could on retrieve certificates for validation.""" class NonAsciiHeaderError(Error): """Header names and values must be ASCII strings.""" def _abstract(): raise NotImplementedError('You need to override this function') class MemoryCache(object): """httplib2 Cache implementation which only caches locally.""" def __init__(self): self.cache = {} def get(self, key): return self.cache.get(key) def set(self, key, value): self.cache[key] = value def delete(self, key): self.cache.pop(key, None) class Credentials(object): """Base class for all Credentials objects. Subclasses must define an authorize() method that applies the credentials to an HTTP transport. Subclasses must also specify a classmethod named 'from_json' that takes a JSON string as input and returns an instaniated Credentials object. """ NON_SERIALIZED_MEMBERS = ['store'] def authorize(self, http): """Take an httplib2.Http instance (or equivalent) and authorizes it. Authorizes it for the set of credentials, usually by replacing http.request() with a method that adds in the appropriate headers and then delegates to the original Http.request() method. Args: http: httplib2.Http, an http object to be used to make the refresh request. """ _abstract() def refresh(self, http): """Forces a refresh of the access_token. Args: http: httplib2.Http, an http object to be used to make the refresh request. """ _abstract() def revoke(self, http): """Revokes a refresh_token and makes the credentials void. Args: http: httplib2.Http, an http object to be used to make the revoke request. """ _abstract() def apply(self, headers): """Add the authorization to the headers. Args: headers: dict, the headers to add the Authorization header to. """ _abstract() def _to_json(self, strip): """Utility function that creates JSON repr. of a Credentials object. Args: strip: array, An array of names of members to not include in the JSON. Returns: string, a JSON representation of this instance, suitable to pass to from_json(). """ t = type(self) d = copy.copy(self.__dict__) for member in strip: if member in d: del d[member] if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime): d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT) # Add in information we will need later to reconsistitue this instance. d['_class'] = t.__name__ d['_module'] = t.__module__ return simplejson.dumps(d) def to_json(self): """Creating a JSON representation of an instance of Credentials. Returns: string, a JSON representation of this instance, suitable to pass to from_json(). """ return self._to_json(Credentials.NON_SERIALIZED_MEMBERS) @classmethod def new_from_json(cls, s): """Utility class method to instantiate a Credentials subclass from a JSON representation produced by to_json(). Args: s: string, JSON from to_json(). Returns: An instance of the subclass of Credentials that was serialized with to_json(). """ data = simplejson.loads(s) # Find and call the right classmethod from_json() to restore the object. module = data['_module'] try: m = __import__(module) except ImportError: # In case there's an object from the old package structure, update it module = module.replace('.apiclient', '') m = __import__(module) m = __import__(module, fromlist=module.split('.')[:-1]) kls = getattr(m, data['_class']) from_json = getattr(kls, 'from_json') return from_json(s) @classmethod def from_json(cls, s): """Instantiate a Credentials object from a JSON description of it. The JSON should have been produced by calling .to_json() on the object. Args: data: dict, A deserialized JSON object. Returns: An instance of a Credentials subclass. """ return Credentials() class Flow(object): """Base class for all Flow objects.""" pass class Storage(object): """Base class for all Storage objects. Store and retrieve a single credential. This class supports locking such that multiple processes and threads can operate on a single store. """ def acquire_lock(self): """Acquires any lock necessary to access this Storage. This lock is not reentrant. """ pass def release_lock(self): """Release the Storage lock. Trying to release a lock that isn't held will result in a RuntimeError. """ pass def locked_get(self): """Retrieve credential. The Storage lock must be held when this is called. Returns: oauth2client.client.Credentials """ _abstract() def locked_put(self, credentials): """Write a credential. The Storage lock must be held when this is called. Args: credentials: Credentials, the credentials to store. """ _abstract() def locked_delete(self): """Delete a credential. The Storage lock must be held when this is called. """ _abstract() def get(self): """Retrieve credential. The Storage lock must *not* be held when this is called. Returns: oauth2client.client.Credentials """ self.acquire_lock() try: return self.locked_get() finally: self.release_lock() def put(self, credentials): """Write a credential. The Storage lock must be held when this is called. Args: credentials: Credentials, the credentials to store. """ self.acquire_lock() try: self.locked_put(credentials) finally: self.release_lock() def delete(self): """Delete credential. Frees any resources associated with storing the credential. The Storage lock must *not* be held when this is called. Returns: None """ self.acquire_lock() try: return self.locked_delete() finally: self.release_lock() def clean_headers(headers): """Forces header keys and values to be strings, i.e not unicode. The httplib module just concats the header keys and values in a way that may make the message header a unicode string, which, if it then tries to contatenate to a binary request body may result in a unicode decode error. Args: headers: dict, A dictionary of headers. Returns: The same dictionary but with all the keys converted to strings. """ clean = {} try: for k, v in headers.iteritems(): clean[str(k)] = str(v) except UnicodeEncodeError: raise NonAsciiHeaderError(k + ': ' + v) return clean def _update_query_params(uri, params): """Updates a URI with new query parameters. Args: uri: string, A valid URI, with potential existing query parameters. params: dict, A dictionary of query parameters. Returns: The same URI but with the new query parameters added. """ parts = list(urlparse.urlparse(uri)) query_params = dict(parse_qsl(parts[4])) # 4 is the index of the query part query_params.update(params) parts[4] = urllib.urlencode(query_params) return urlparse.urlunparse(parts) class OAuth2Credentials(Credentials): """Credentials object for OAuth 2.0. Credentials can be applied to an httplib2.Http object using the authorize() method, which then adds the OAuth 2.0 access token to each request. OAuth2Credentials objects may be safely pickled and unpickled. """ @util.positional(8) def __init__(self, access_token, client_id, client_secret, refresh_token, token_expiry, token_uri, user_agent, revoke_uri=None, id_token=None, token_response=None): """Create an instance of OAuth2Credentials. This constructor is not usually called by the user, instead OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow. Args: access_token: string, access token. client_id: string, client identifier. client_secret: string, client secret. refresh_token: string, refresh token. token_expiry: datetime, when the access_token expires. token_uri: string, URI of token endpoint. user_agent: string, The HTTP User-Agent to provide for this application. revoke_uri: string, URI for revoke endpoint. Defaults to None; a token can't be revoked if this is None. id_token: object, The identity of the resource owner. token_response: dict, the decoded response to the token request. None if a token hasn't been requested yet. Stored because some providers (e.g. wordpress.com) include extra fields that clients may want. Notes: store: callable, A callable that when passed a Credential will store the credential back to where it came from. This is needed to store the latest access_token if it has expired and been refreshed. """ self.access_token = access_token self.client_id = client_id self.client_secret = client_secret self.refresh_token = refresh_token self.store = None self.token_expiry = token_expiry self.token_uri = token_uri self.user_agent = user_agent self.revoke_uri = revoke_uri self.id_token = id_token self.token_response = token_response # True if the credentials have been revoked or expired and can't be # refreshed. self.invalid = False def authorize(self, http): """Authorize an httplib2.Http instance with these credentials. The modified http.request method will add authentication headers to each request and will refresh access_tokens when a 401 is received on a request. In addition the http.request method has a credentials property, http.request.credentials, which is the Credentials object that authorized it. Args: http: An instance of httplib2.Http or something that acts like it. Returns: A modified instance of http that was passed in. Example: h = httplib2.Http() h = credentials.authorize(h) You can't create a new OAuth subclass of httplib2.Authenication because it never gets passed the absolute URI, which is needed for signing. So instead we have to overload 'request' with a closure that adds in the Authorization header and then calls the original version of 'request()'. """ request_orig = http.request # The closure that will replace 'httplib2.Http.request'. @util.positional(1) def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if not self.access_token: logger.info('Attempting refresh to obtain initial access_token') self._refresh(request_orig) # Modify the request headers to add the appropriate # Authorization header. if headers is None: headers = {} self.apply(headers) if self.user_agent is not None: if 'user-agent' in headers: headers['user-agent'] = self.user_agent + ' ' + headers['user-agent'] else: headers['user-agent'] = self.user_agent resp, content = request_orig(uri, method, body, clean_headers(headers), redirections, connection_type) if resp.status in REFRESH_STATUS_CODES: logger.info('Refreshing due to a %s' % str(resp.status)) self._refresh(request_orig) self.apply(headers) return request_orig(uri, method, body, clean_headers(headers), redirections, connection_type) else: return (resp, content) # Replace the request method with our own closure. http.request = new_request # Set credentials as a property of the request method. setattr(http.request, 'credentials', self) return http def refresh(self, http): """Forces a refresh of the access_token. Args: http: httplib2.Http, an http object to be used to make the refresh request. """ self._refresh(http.request) def revoke(self, http): """Revokes a refresh_token and makes the credentials void. Args: http: httplib2.Http, an http object to be used to make the revoke request. """ self._revoke(http.request) def apply(self, headers): """Add the authorization to the headers. Args: headers: dict, the headers to add the Authorization header to. """ headers['Authorization'] = 'Bearer ' + self.access_token def to_json(self): return self._to_json(Credentials.NON_SERIALIZED_MEMBERS) @classmethod def from_json(cls, s): """Instantiate a Credentials object from a JSON description of it. The JSON should have been produced by calling .to_json() on the object. Args: data: dict, A deserialized JSON object. Returns: An instance of a Credentials subclass. """ data = simplejson.loads(s) if 'token_expiry' in data and not isinstance(data['token_expiry'], datetime.datetime): try: data['token_expiry'] = datetime.datetime.strptime( data['token_expiry'], EXPIRY_FORMAT) except: data['token_expiry'] = None retval = cls( data['access_token'], data['client_id'], data['client_secret'], data['refresh_token'], data['token_expiry'], data['token_uri'], data['user_agent'], revoke_uri=data.get('revoke_uri', None), id_token=data.get('id_token', None), token_response=data.get('token_response', None)) retval.invalid = data['invalid'] return retval @property def access_token_expired(self): """True if the credential is expired or invalid. If the token_expiry isn't set, we assume the token doesn't expire. """ if self.invalid: return True if not self.token_expiry: return False now = datetime.datetime.utcnow() if now >= self.token_expiry: logger.info('access_token is expired. Now: %s, token_expiry: %s', now, self.token_expiry) return True return False def set_store(self, store): """Set the Storage for the credential. Args: store: Storage, an implementation of Stroage object. This is needed to store the latest access_token if it has expired and been refreshed. This implementation uses locking to check for updates before updating the access_token. """ self.store = store def _updateFromCredential(self, other): """Update this Credential from another instance.""" self.__dict__.update(other.__getstate__()) def __getstate__(self): """Trim the state down to something that can be pickled.""" d = copy.copy(self.__dict__) del d['store'] return d def __setstate__(self, state): """Reconstitute the state of the object from being pickled.""" self.__dict__.update(state) self.store = None def _generate_refresh_request_body(self): """Generate the body that will be used in the refresh request.""" body = urllib.urlencode({ 'grant_type': 'refresh_token', 'client_id': self.client_id, 'client_secret': self.client_secret, 'refresh_token': self.refresh_token, }) return body def _generate_refresh_request_headers(self): """Generate the headers that will be used in the refresh request.""" headers = { 'content-type': 'application/x-www-form-urlencoded', } if self.user_agent is not None: headers['user-agent'] = self.user_agent return headers def _refresh(self, http_request): """Refreshes the access_token. This method first checks by reading the Storage object if available. If a refresh is still needed, it holds the Storage lock until the refresh is completed. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the refresh request. Raises: AccessTokenRefreshError: When the refresh fails. """ if not self.store: self._do_refresh_request(http_request) else: self.store.acquire_lock() try: new_cred = self.store.locked_get() if (new_cred and not new_cred.invalid and new_cred.access_token != self.access_token): logger.info('Updated access_token read from Storage') self._updateFromCredential(new_cred) else: self._do_refresh_request(http_request) finally: self.store.release_lock() def _do_refresh_request(self, http_request): """Refresh the access_token using the refresh_token. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the refresh request. Raises: AccessTokenRefreshError: When the refresh fails. """ body = self._generate_refresh_request_body() headers = self._generate_refresh_request_headers() logger.info('Refreshing access_token') resp, content = http_request( self.token_uri, method='POST', body=body, headers=headers) if resp.status == 200: # TODO(jcgregorio) Raise an error if loads fails? d = simplejson.loads(content) self.token_response = d self.access_token = d['access_token'] self.refresh_token = d.get('refresh_token', self.refresh_token) if 'expires_in' in d: self.token_expiry = datetime.timedelta( seconds=int(d['expires_in'])) + datetime.datetime.utcnow() else: self.token_expiry = None if self.store: self.store.locked_put(self) else: # An {'error':...} response body means the token is expired or revoked, # so we flag the credentials as such. logger.info('Failed to retrieve access token: %s' % content) error_msg = 'Invalid response %s.' % resp['status'] try: d = simplejson.loads(content) if 'error' in d: error_msg = d['error'] self.invalid = True if self.store: self.store.locked_put(self) except StandardError: pass raise AccessTokenRefreshError(error_msg) def _revoke(self, http_request): """Revokes the refresh_token and deletes the store if available. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the revoke request. """ self._do_revoke(http_request, self.refresh_token) def _do_revoke(self, http_request, token): """Revokes the credentials and deletes the store if available. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the refresh request. token: A string used as the token to be revoked. Can be either an access_token or refresh_token. Raises: TokenRevokeError: If the revoke request does not return with a 200 OK. """ logger.info('Revoking token') query_params = {'token': token} token_revoke_uri = _update_query_params(self.revoke_uri, query_params) resp, content = http_request(token_revoke_uri) if resp.status == 200: self.invalid = True else: error_msg = 'Invalid response %s.' % resp.status try: d = simplejson.loads(content) if 'error' in d: error_msg = d['error'] except StandardError: pass raise TokenRevokeError(error_msg) if self.store: self.store.delete() class AccessTokenCredentials(OAuth2Credentials): """Credentials object for OAuth 2.0. Credentials can be applied to an httplib2.Http object using the authorize() method, which then signs each request from that object with the OAuth 2.0 access token. This set of credentials is for the use case where you have acquired an OAuth 2.0 access_token from another place such as a JavaScript client or another web application, and wish to use it from Python. Because only the access_token is present it can not be refreshed and will in time expire. AccessTokenCredentials objects may be safely pickled and unpickled. Usage: credentials = AccessTokenCredentials('<an access token>', 'my-user-agent/1.0') http = httplib2.Http() http = credentials.authorize(http) Exceptions: AccessTokenCredentialsExpired: raised when the access_token expires or is revoked. """ def __init__(self, access_token, user_agent, revoke_uri=None): """Create an instance of OAuth2Credentials This is one of the few types if Credentials that you should contrust, Credentials objects are usually instantiated by a Flow. Args: access_token: string, access token. user_agent: string, The HTTP User-Agent to provide for this application. revoke_uri: string, URI for revoke endpoint. Defaults to None; a token can't be revoked if this is None. """ super(AccessTokenCredentials, self).__init__( access_token, None, None, None, None, None, user_agent, revoke_uri=revoke_uri) @classmethod def from_json(cls, s): data = simplejson.loads(s) retval = AccessTokenCredentials( data['access_token'], data['user_agent']) return retval def _refresh(self, http_request): raise AccessTokenCredentialsError( 'The access_token is expired or invalid and can\'t be refreshed.') def _revoke(self, http_request): """Revokes the access_token and deletes the store if available. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the revoke request. """ self._do_revoke(http_request, self.access_token) class AssertionCredentials(OAuth2Credentials): """Abstract Credentials object used for OAuth 2.0 assertion grants. This credential does not require a flow to instantiate because it represents a two legged flow, and therefore has all of the required information to generate and refresh its own access tokens. It must be subclassed to generate the appropriate assertion string. AssertionCredentials objects may be safely pickled and unpickled. """ @util.positional(2) def __init__(self, assertion_type, user_agent=None, token_uri=GOOGLE_TOKEN_URI, revoke_uri=GOOGLE_REVOKE_URI, **unused_kwargs): """Constructor for AssertionFlowCredentials. Args: assertion_type: string, assertion type that will be declared to the auth server user_agent: string, The HTTP User-Agent to provide for this application. token_uri: string, URI for token endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. revoke_uri: string, URI for revoke endpoint. """ super(AssertionCredentials, self).__init__( None, None, None, None, None, token_uri, user_agent, revoke_uri=revoke_uri) self.assertion_type = assertion_type def _generate_refresh_request_body(self): assertion = self._generate_assertion() body = urllib.urlencode({ 'assertion': assertion, 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', }) return body def _generate_assertion(self): """Generate the assertion string that will be used in the access token request. """ _abstract() def _revoke(self, http_request): """Revokes the access_token and deletes the store if available. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the revoke request. """ self._do_revoke(http_request, self.access_token) if HAS_CRYPTO: # PyOpenSSL and PyCrypto are not prerequisites for oauth2client, so if it is # missing then don't create the SignedJwtAssertionCredentials or the # verify_id_token() method. class SignedJwtAssertionCredentials(AssertionCredentials): """Credentials object used for OAuth 2.0 Signed JWT assertion grants. This credential does not require a flow to instantiate because it represents a two legged flow, and therefore has all of the required information to generate and refresh its own access tokens. SignedJwtAssertionCredentials requires either PyOpenSSL, or PyCrypto 2.6 or later. For App Engine you may also consider using AppAssertionCredentials. """ MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds @util.positional(4) def __init__(self, service_account_name, private_key, scope, private_key_password='notasecret', user_agent=None, token_uri=GOOGLE_TOKEN_URI, revoke_uri=GOOGLE_REVOKE_URI, **kwargs): """Constructor for SignedJwtAssertionCredentials. Args: service_account_name: string, id for account, usually an email address. private_key: string, private key in PKCS12 or PEM format. scope: string or iterable of strings, scope(s) of the credentials being requested. private_key_password: string, password for private_key, unused if private_key is in PEM format. user_agent: string, HTTP User-Agent to provide for this application. token_uri: string, URI for token endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. revoke_uri: string, URI for revoke endpoint. kwargs: kwargs, Additional parameters to add to the JWT token, for example sub=joe@xample.org.""" super(SignedJwtAssertionCredentials, self).__init__( None, user_agent=user_agent, token_uri=token_uri, revoke_uri=revoke_uri, ) self.scope = util.scopes_to_string(scope) # Keep base64 encoded so it can be stored in JSON. self.private_key = base64.b64encode(private_key) self.private_key_password = private_key_password self.service_account_name = service_account_name self.kwargs = kwargs @classmethod def from_json(cls, s): data = simplejson.loads(s) retval = SignedJwtAssertionCredentials( data['service_account_name'], base64.b64decode(data['private_key']), data['scope'], private_key_password=data['private_key_password'], user_agent=data['user_agent'], token_uri=data['token_uri'], **data['kwargs'] ) retval.invalid = data['invalid'] retval.access_token = data['access_token'] return retval def _generate_assertion(self): """Generate the assertion that will be used in the request.""" now = long(time.time()) payload = { 'aud': self.token_uri, 'scope': self.scope, 'iat': now, 'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS, 'iss': self.service_account_name } payload.update(self.kwargs) logger.debug(str(payload)) private_key = base64.b64decode(self.private_key) return crypt.make_signed_jwt(crypt.Signer.from_string( private_key, self.private_key_password), payload) # Only used in verify_id_token(), which is always calling to the same URI # for the certs. _cached_http = httplib2.Http(MemoryCache()) @util.positional(2) def verify_id_token(id_token, audience, http=None, cert_uri=ID_TOKEN_VERIFICATON_CERTS): """Verifies a signed JWT id_token. This function requires PyOpenSSL and because of that it does not work on App Engine. Args: id_token: string, A Signed JWT. audience: string, The audience 'aud' that the token should be for. http: httplib2.Http, instance to use to make the HTTP request. Callers should supply an instance that has caching enabled. cert_uri: string, URI of the certificates in JSON format to verify the JWT against. Returns: The deserialized JSON in the JWT. Raises: oauth2client.crypt.AppIdentityError if the JWT fails to verify. """ if http is None: http = _cached_http resp, content = http.request(cert_uri) if resp.status == 200: certs = simplejson.loads(content) return crypt.verify_signed_jwt_with_certs(id_token, certs, audience) else: raise VerifyJwtTokenError('Status code: %d' % resp.status) def _urlsafe_b64decode(b64string): # Guard against unicode strings, which base64 can't handle. b64string = b64string.encode('ascii') padded = b64string + '=' * (4 - len(b64string) % 4) return base64.urlsafe_b64decode(padded) def _extract_id_token(id_token): """Extract the JSON payload from a JWT. Does the extraction w/o checking the signature. Args: id_token: string, OAuth 2.0 id_token. Returns: object, The deserialized JSON payload. """ segments = id_token.split('.') if (len(segments) != 3): raise VerifyJwtTokenError( 'Wrong number of segments in token: %s' % id_token) return simplejson.loads(_urlsafe_b64decode(segments[1])) def _parse_exchange_token_response(content): """Parses response of an exchange token request. Most providers return JSON but some (e.g. Facebook) return a url-encoded string. Args: content: The body of a response Returns: Content as a dictionary object. Note that the dict could be empty, i.e. {}. That basically indicates a failure. """ resp = {} try: resp = simplejson.loads(content) except StandardError: # different JSON libs raise different exceptions, # so we just do a catch-all here resp = dict(parse_qsl(content)) # some providers respond with 'expires', others with 'expires_in' if resp and 'expires' in resp: resp['expires_in'] = resp.pop('expires') return resp @util.positional(4) def credentials_from_code(client_id, client_secret, scope, code, redirect_uri='postmessage', http=None, user_agent=None, token_uri=GOOGLE_TOKEN_URI, auth_uri=GOOGLE_AUTH_URI, revoke_uri=GOOGLE_REVOKE_URI): """Exchanges an authorization code for an OAuth2Credentials object. Args: client_id: string, client identifier. client_secret: string, client secret. scope: string or iterable of strings, scope(s) to request. code: string, An authroization code, most likely passed down from the client redirect_uri: string, this is generally set to 'postmessage' to match the redirect_uri that the client specified http: httplib2.Http, optional http instance to use to do the fetch token_uri: string, URI for token endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. auth_uri: string, URI for authorization endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. revoke_uri: string, URI for revoke endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. Returns: An OAuth2Credentials object. Raises: FlowExchangeError if the authorization code cannot be exchanged for an access token """ flow = OAuth2WebServerFlow(client_id, client_secret, scope, redirect_uri=redirect_uri, user_agent=user_agent, auth_uri=auth_uri, token_uri=token_uri, revoke_uri=revoke_uri) credentials = flow.step2_exchange(code, http=http) return credentials @util.positional(3) def credentials_from_clientsecrets_and_code(filename, scope, code, message = None, redirect_uri='postmessage', http=None, cache=None): """Returns OAuth2Credentials from a clientsecrets file and an auth code. Will create the right kind of Flow based on the contents of the clientsecrets file or will raise InvalidClientSecretsError for unknown types of Flows. Args: filename: string, File name of clientsecrets. scope: string or iterable of strings, scope(s) to request. code: string, An authorization code, most likely passed down from the client message: string, A friendly string to display to the user if the clientsecrets file is missing or invalid. If message is provided then sys.exit will be called in the case of an error. If message in not provided then clientsecrets.InvalidClientSecretsError will be raised. redirect_uri: string, this is generally set to 'postmessage' to match the redirect_uri that the client specified http: httplib2.Http, optional http instance to use to do the fetch cache: An optional cache service client that implements get() and set() methods. See clientsecrets.loadfile() for details. Returns: An OAuth2Credentials object. Raises: FlowExchangeError if the authorization code cannot be exchanged for an access token UnknownClientSecretsFlowError if the file describes an unknown kind of Flow. clientsecrets.InvalidClientSecretsError if the clientsecrets file is invalid. """ flow = flow_from_clientsecrets(filename, scope, message=message, cache=cache, redirect_uri=redirect_uri) credentials = flow.step2_exchange(code, http=http) return credentials class OAuth2WebServerFlow(Flow): """Does the Web Server Flow for OAuth 2.0. OAuth2WebServerFlow objects may be safely pickled and unpickled. """ @util.positional(4) def __init__(self, client_id, client_secret, scope, redirect_uri=None, user_agent=None, auth_uri=GOOGLE_AUTH_URI, token_uri=GOOGLE_TOKEN_URI, revoke_uri=GOOGLE_REVOKE_URI, **kwargs): """Constructor for OAuth2WebServerFlow. The kwargs argument is used to set extra query parameters on the auth_uri. For example, the access_type and approval_prompt query parameters can be set via kwargs. Args: client_id: string, client identifier. client_secret: string client secret. scope: string or iterable of strings, scope(s) of the credentials being requested. redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. user_agent: string, HTTP User-Agent to provide for this application. auth_uri: string, URI for authorization endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. token_uri: string, URI for token endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. revoke_uri: string, URI for revoke endpoint. For convenience defaults to Google's endpoints but any OAuth 2.0 provider can be used. **kwargs: dict, The keyword arguments are all optional and required parameters for the OAuth calls. """ self.client_id = client_id self.client_secret = client_secret self.scope = util.scopes_to_string(scope) self.redirect_uri = redirect_uri self.user_agent = user_agent self.auth_uri = auth_uri self.token_uri = token_uri self.revoke_uri = revoke_uri self.params = { 'access_type': 'offline', 'response_type': 'code', } self.params.update(kwargs) @util.positional(1) def step1_get_authorize_url(self, redirect_uri=None): """Returns a URI to redirect to the provider. Args: redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. This parameter is deprecated, please move to passing the redirect_uri in via the constructor. Returns: A URI as a string to redirect the user to begin the authorization flow. """ if redirect_uri is not None: logger.warning(('The redirect_uri parameter for' 'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. Please' 'move to passing the redirect_uri in via the constructor.')) self.redirect_uri = redirect_uri if self.redirect_uri is None: raise ValueError('The value of redirect_uri must not be None.') query_params = { 'client_id': self.client_id, 'redirect_uri': self.redirect_uri, 'scope': self.scope, } query_params.update(self.params) return _update_query_params(self.auth_uri, query_params) @util.positional(2) def step2_exchange(self, code, http=None): """Exhanges a code for OAuth2Credentials. Args: code: string or dict, either the code as a string, or a dictionary of the query parameters to the redirect_uri, which contains the code. http: httplib2.Http, optional http instance to use to do the fetch Returns: An OAuth2Credentials object that can be used to authorize requests. Raises: FlowExchangeError if a problem occured exchanging the code for a refresh_token. """ if not (isinstance(code, str) or isinstance(code, unicode)): if 'code' not in code: if 'error' in code: error_msg = code['error'] else: error_msg = 'No code was supplied in the query parameters.' raise FlowExchangeError(error_msg) else: code = code['code'] body = urllib.urlencode({ 'grant_type': 'authorization_code', 'client_id': self.client_id, 'client_secret': self.client_secret, 'code': code, 'redirect_uri': self.redirect_uri, 'scope': self.scope, }) headers = { 'content-type': 'application/x-www-form-urlencoded', } if self.user_agent is not None: headers['user-agent'] = self.user_agent if http is None: http = httplib2.Http() resp, content = http.request(self.token_uri, method='POST', body=body, headers=headers) d = _parse_exchange_token_response(content) if resp.status == 200 and 'access_token' in d: access_token = d['access_token'] refresh_token = d.get('refresh_token', None) token_expiry = None if 'expires_in' in d: token_expiry = datetime.datetime.utcnow() + datetime.timedelta( seconds=int(d['expires_in'])) if 'id_token' in d: d['id_token'] = _extract_id_token(d['id_token']) logger.info('Successfully retrieved access token') return OAuth2Credentials(access_token, self.client_id, self.client_secret, refresh_token, token_expiry, self.token_uri, self.user_agent, revoke_uri=self.revoke_uri, id_token=d.get('id_token', None), token_response=d) else: logger.info('Failed to retrieve access token: %s' % content) if 'error' in d: # you never know what those providers got to say error_msg = unicode(d['error']) else: error_msg = 'Invalid response: %s.' % str(resp.status) raise FlowExchangeError(error_msg) @util.positional(2) def flow_from_clientsecrets(filename, scope, redirect_uri=None, message=None, cache=None): """Create a Flow from a clientsecrets file. Will create the right kind of Flow based on the contents of the clientsecrets file or will raise InvalidClientSecretsError for unknown types of Flows. Args: filename: string, File name of client secrets. scope: string or iterable of strings, scope(s) to request. redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. message: string, A friendly string to display to the user if the clientsecrets file is missing or invalid. If message is provided then sys.exit will be called in the case of an error. If message in not provided then clientsecrets.InvalidClientSecretsError will be raised. cache: An optional cache service client that implements get() and set() methods. See clientsecrets.loadfile() for details. Returns: A Flow object. Raises: UnknownClientSecretsFlowError if the file describes an unknown kind of Flow. clientsecrets.InvalidClientSecretsError if the clientsecrets file is invalid. """ try: client_type, client_info = clientsecrets.loadfile(filename, cache=cache) if client_type in (clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED): constructor_kwargs = { 'redirect_uri': redirect_uri, 'auth_uri': client_info['auth_uri'], 'token_uri': client_info['token_uri'], } revoke_uri = client_info.get('revoke_uri') if revoke_uri is not None: constructor_kwargs['revoke_uri'] = revoke_uri return OAuth2WebServerFlow( client_info['client_id'], client_info['client_secret'], scope, **constructor_kwargs) except clientsecrets.InvalidClientSecretsError: if message: sys.exit(message) else: raise else: raise UnknownClientSecretsFlowError( 'This OAuth 2.0 flow is unsupported: %r' % client_type)
bsd-3-clause
tsgit/invenio
modules/bibknowledge/lib/bibknowledge_dblayer.py
19
15484
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2009, 2010, 2011, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Database access related functions for BibKnowledge. """ __revision__ = "$Id$" from invenio.dbquery import run_sql from invenio.memoiseutils import Memoise def get_kbs_info(kbtypeparam="", searchkbname=""): """Returns all kbs as list of dictionaries {id, name, description, kbtype} If the KB is dynamic, the dynamic kb key are added in the dict. """ out = [] query = "SELECT id, name, description, kbtype FROM knwKB ORDER BY name" res = run_sql(query) for row in res: doappend = 1 # by default kbid = row[0] name = row[1] description = row[2] kbtype = row[3] dynres = {} if kbtype == 'd': #get the dynamic config dynres = get_kb_dyn_config(kbid) if kbtypeparam: doappend = 0 if (kbtype == kbtypeparam): doappend = 1 if searchkbname: doappend = 0 if (name == searchkbname): doappend = 1 if doappend: mydict = {'id':kbid, 'name':name, 'description':description, 'kbtype':kbtype} mydict.update(dynres) out.append(mydict) return out def get_all_kb_names(): """Returns all knowledge base names @return list of names """ out = [] res = run_sql("""SELECT name FROM knwKB""") for row in res: out.append(row[0]) return out def get_kb_id(kb_name): """Returns the id of the kb with given name""" res = run_sql("""SELECT id FROM knwKB WHERE name LIKE %s""", (kb_name,)) if len(res) > 0: return res[0][0] else: return None get_kb_id_memoised = Memoise(get_kb_id) def get_kb_name(kb_id): """Returns the name of the kb with given id @param kb_id the id @return string """ res = run_sql("""SELECT name FROM knwKB WHERE id=%s""", (kb_id,)) if len(res) > 0: return res[0][0] else: return None def get_kb_type(kb_id): """Returns the type of the kb with given id @param kb_id knowledge base id @return kb_type """ res = run_sql("""SELECT kbtype FROM knwKB WHERE id=%s""", (kb_id,)) if len(res) > 0: return res[0][0] else: return None def get_kb_mappings(kb_name="", sortby="to", keylike="", valuelike="", match_type="s"): """Returns a list of all mappings from the given kb, ordered by key @param kb_name knowledge base name. if "", return all @param sortby the sorting criteria ('from' or 'to') @keylike return only entries where key matches this @valuelike return only entries where value matches this """ out = [] k_id = get_kb_id(kb_name) if len(keylike) > 0: if match_type == "s": keylike = "%"+keylike+"%" else: keylike = '%' if len(valuelike) > 0: if match_type == "s": valuelike = "%"+valuelike+"%" else: valuelike = '%' if not kb_name: res = run_sql("""SELECT m.id, m.m_key, m.m_value, m.id_knwKB, k.name FROM knwKBRVAL m, knwKB k where m_key like %s and m_value like %s and m.id_knwKB = k.id""", (keylike, valuelike)) else: res = run_sql("""SELECT m.id, m.m_key, m.m_value, m.id_knwKB, k.name FROM knwKBRVAL m, knwKB k WHERE id_knwKB=%s and m.id_knwKB = k.id and m_key like %s and m_value like %s""", (k_id, keylike, valuelike)) #sort res lres = list(res) if sortby == "from": lres.sort(lambda x, y:cmp(x[1], y[1])) else: lres.sort(lambda x, y:cmp(x[2], y[2])) for row in lres: out.append({'id':row[0], 'key':row[1], 'value': row[2], 'kbid': row[3], 'kbname': row[4]}) return out def get_kb_dyn_config(kb_id): """ Returns a dictionary of 'field'=> y, 'expression'=> z for a knowledge base of type 'd'. The dictionary may have coll_id, collection. @param kb_id the id @return dict """ res = run_sql("""SELECT output_tag, search_expression, id_collection FROM knwKBDDEF where id_knwKB = %s""", (kb_id, )) mydict = {} for row in res: mydict['field'] = row[0] mydict['expression'] = row[1] mydict['coll_id'] = row[2] #put a collection field if collection exists.. if mydict.has_key('coll_id'): c_id = mydict['coll_id'] res = run_sql("""SELECT name from collection where id = %s""", (c_id,)) if res: mydict['collection'] = res[0][0] return mydict def save_kb_dyn_config(kb_id, field, expression, collection=""): """Saves a dynamic knowledge base configuration @param kb_id the id @param field the field where values are extracted @param expression ..using this expression @param collection ..in a certain collection (default is all) """ #check that collection exists coll_id = None if collection: res = run_sql("""SELECT id from collection where name = %s""", (collection,)) if res: coll_id = res[0][0] run_sql("""DELETE FROM knwKBDDEF where id_knwKB = %s""", (kb_id, )) run_sql("""INSERT INTO knwKBDDEF (id_knwKB, output_tag, search_expression, id_collection) VALUES (%s,%s,%s,%s)""", (kb_id, field, expression, coll_id)) return "" def get_kb_description(kb_name): """Returns the description of the given kb @param kb_id the id @return string """ k_id = get_kb_id(kb_name) res = run_sql("""SELECT description FROM knwKB WHERE id=%s""", (k_id,)) return res[0][0] def add_kb(kb_name, kb_description, kb_type=None): """ Adds a new kb with given name and description. Returns the id of the kb. If name already exists replace old value @param kb_name the name of the kb to create @param kb_description a description for the kb @return the id of the newly created kb """ kb_db = 'w' #the typical written_as - change_to if not kb_type: pass else: if kb_type == 'taxonomy': kb_db = 't' if kb_type == 'dynamic': kb_db = 'd' run_sql("""REPLACE INTO knwKB (name, description, kbtype) VALUES (%s,%s,%s)""", (kb_name, kb_description, kb_db)) return get_kb_id(kb_name) def delete_kb(kb_name): """Deletes the given kb""" k_id = get_kb_id(kb_name) run_sql("""DELETE FROM knwKBRVAL WHERE id_knwKB = %s""", (k_id,)) run_sql("""DELETE FROM knwKB WHERE id = %s""", (k_id,)) #finally, delete from COLL table run_sql("""DELETE FROM knwKBDDEF where id_knwKB = %s""", (k_id,)) return True def kb_exists(kb_name): """Returns True if a kb with the given name exists""" rows = run_sql("""SELECT id FROM knwKB WHERE name = %s""", (kb_name,)) if len(rows) > 0: return True else: return False def update_kb(kb_name, new_name, new_description=''): """Updates given kb with new name and (optionally) new description""" k_id = get_kb_id(kb_name) run_sql("""UPDATE knwKB SET name = %s , description = %s WHERE id = %s""", (new_name, new_description, k_id)) return True def add_kb_mapping(kb_name, key, value): """Adds new mapping key->value in given kb""" k_id = get_kb_id(kb_name) run_sql("""REPLACE INTO knwKBRVAL (m_key, m_value, id_knwKB) VALUES (%s, %s, %s)""", (key, value, k_id)) return True def remove_kb_mapping(kb_name, key): """Removes mapping with given key from given kb""" k_id = get_kb_id(kb_name) run_sql("""DELETE FROM knwKBRVAL WHERE m_key = %s AND id_knwKB = %s""", (key, k_id)) return True def kb_mapping_exists(kb_name, key): """Returns true if the mapping with given key exists in the given kb""" if kb_exists(kb_name): k_id = get_kb_id(kb_name) rows = run_sql("""SELECT id FROM knwKBRVAL WHERE m_key = %s AND id_knwKB = %s""", (key, k_id)) if len(rows) > 0: return True return False def kb_key_rules(key): """Returns a list of 4-tuples that have a key->value mapping in some KB The format of the tuples is [kb_id, kb_name,key,value] """ res = run_sql("""SELECT f.id, f.name, m.m_key, m.m_value from knwKBRVAL as m JOIN knwKB as f on m.id_knwKB=f.id WHERE m.m_key = %s""", (key, )) return res def kb_value_rules(value): """Returns a list of 4-tuples that have a key->value mapping in some KB The format of the tuples is [kb_id, kb_name,key,value] """ res = run_sql("""SELECT f.id, f.name, m.m_key, m.m_value from knwKBRVAL as m JOIN knwKB as f on m.id_knwKB=f.id WHERE m.m_value = %s""", (value, )) return res def get_kb_mapping_value(kb_name, key): """ Returns a value of the given key from the given kb. If mapping not found, returns None #'default' @param kb_name the name of a knowledge base @param key the key to look for #@param default a default value to return if mapping is not found """ k_id = get_kb_id(kb_name) res = run_sql("""SELECT m_value FROM knwKBRVAL WHERE m_key LIKE %s AND id_knwKB = %s LIMIT 1""", (key, k_id)) if len(res) > 0: return res[0][0] else: return None # default def update_kb_mapping(kb_name, key, new_key, new_value): """Updates the mapping given by key with new key and value""" k_id = get_kb_id(kb_name) run_sql("""UPDATE knwKBRVAL SET m_key = %s , m_value = %s WHERE m_key = %s AND id_knwKB = %s""", (new_key, new_value, key, k_id)) return True #the following functions should be used by a higher level API def get_kba_values(kb_name, searchname="", searchtype="s"): """Returns the "authority file" type of list of values for a given knowledge base. @param kb_name the name of the knowledge base @param searchname search by this.. @param searchtype s=substring, e=exact, sw=startswith """ k_id = get_kb_id(kb_name) if searchtype == 's' and searchname: searchname = '%'+searchname+'%' if searchtype == 'sw' and searchname: #startswith searchname = searchname+'%' if not searchname: searchname = '%' res = run_sql("""SELECT m_value FROM knwKBRVAL WHERE m_value LIKE %s AND id_knwKB = %s""", (searchname, k_id)) return res def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'): """Returns keys from a knowledge base @param kb_name the name of the knowledge base @param searchkey search using this key @param searchvalue search using this value @param searchtype s=substring, e=exact, sw=startswith """ k_id = get_kb_id(kb_name) if searchtype == 's' and searchkey: searchkey = '%'+searchkey+'%' if searchtype == 's' and searchvalue: searchvalue = '%'+searchvalue+'%' if searchtype == 'sw' and searchvalue: #startswith searchvalue = searchvalue+'%' if not searchvalue: searchvalue = '%' if not searchkey: searchkey = '%' return run_sql("""SELECT m_key FROM knwKBRVAL WHERE m_value LIKE %s AND m_key LIKE %s AND id_knwKB = %s""", (searchvalue, searchkey, k_id)) def get_kbr_values(kb_name, searchkey="%", searchvalue="", searchtype='s', use_memoise=False): """Returns values from a knowledge base Note the intentional asymmetry between searchkey and searchvalue: If searchkey is unspecified or empty for substring, it matches anything, but if it is empty for exact, it matches nothing. If searchvalue is unspecified or empty, it matches anything in all cases. @param kb_name the name of the knowledge base @param searchkey search using this key @param searchvalue search using this value @param searchtype s=substring, e=exact, sw=startswith @param use_memoise: can we memoise while doing lookups? @type use_memoise: bool @return a list of values """ if use_memoise: k_id = get_kb_id_memoised(kb_name) else: k_id = get_kb_id(kb_name) if searchtype == 's': searchkey = '%'+searchkey+'%' if searchtype == 's' and searchvalue: searchvalue = '%'+searchvalue+'%' if searchtype == 'sw' and searchvalue: #startswith searchvalue = searchvalue+'%' if not searchvalue: searchvalue = '%' return run_sql("""SELECT m_value FROM knwKBRVAL WHERE m_value LIKE %s AND m_key LIKE %s AND id_knwKB = %s""", (searchvalue, searchkey, k_id)) get_kbr_values_memoised = Memoise(get_kbr_values) def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'): """Returns dicts of 'key' and 'value' from a knowledge base @param kb_name the name of the knowledge base @param searchkey search using this key @param searchvalue search using this value @param searchtype s=substring, e=exact, sw=startswith @return a list of dictionaries [{'key'=>x, 'value'=>y},..] """ k_id = get_kb_id(kb_name) if searchtype == 's' and searchkey: searchkey = '%'+searchkey+'%' if searchtype == 's' and searchvalue: searchvalue = '%'+searchvalue+'%' if searchtype == 'sw' and searchvalue: #startswith searchvalue = searchvalue+'%' if not searchvalue: searchvalue = '%' if not searchkey: searchkey = '%' res = [] rows = run_sql("""SELECT m_key, m_value FROM knwKBRVAL WHERE m_value LIKE %s AND m_key LIKE %s AND id_knwKB = %s""", (searchvalue, searchkey, k_id)) for row in rows: mdict = {} m_key = row[0] m_value = row[1] mdict['key'] = m_key mdict['value'] = m_value res.append(mdict) return res
gpl-2.0
rgerkin/neuroConstruct
lib/jython/Lib/modjy/modjy_params.py
109
2453
### # # Copyright Alan Kennedy. # # You may contact the copyright holder at this uri: # # http://www.xhaus.com/contact/modjy # # The licence under which this code is released is the Apache License v2.0. # # The terms and conditions of this license are listed in a file contained # in the distribution that also contained this file, under the name # LICENSE.txt. # # You may also read a copy of the license at the following web address. # # http://modjy.xhaus.com/LICENSE.txt # ### from UserDict import UserDict BOOLEAN = ('boolean', int) INTEGER = ('integer', int) FLOAT = ('float', float) STRING = ('string', None) modjy_servlet_params = { 'multithread': (BOOLEAN, 1), 'cache_callables': (BOOLEAN, 1), 'reload_on_mod': (BOOLEAN, 0), 'app_import_name': (STRING, None), 'app_directory': (STRING, None), 'app_filename': (STRING, 'application.py'), 'app_callable_name': (STRING, 'handler'), 'callable_query_name': (STRING, None), 'exc_handler': (STRING, 'standard'), 'log_level': (STRING, 'info'), 'packages': (STRING, None), 'classdirs': (STRING, None), 'extdirs': (STRING, None), 'initial_env': (STRING, None), } class modjy_param_mgr(UserDict): def __init__(self, param_types): UserDict.__init__(self) self.param_types = param_types for pname in self.param_types.keys(): typ, default = self.param_types[pname] self.__setitem__(pname, default) def __getitem__(self, name): return self._get_defaulted_value(name) def __setitem__(self, name, value): self.data[name] = self._convert_value(name, value) def _convert_value(self, name, value): if self.param_types.has_key(name): typ, default = self.param_types[name] typ_str, typ_func = typ if typ_func: try: return typ_func(value) except ValueError: raise BadParameter("Illegal value for %s parameter '%s': %s" % (typ_str, name, value) ) return value def _get_defaulted_value(self, name): if self.data.has_key(name): return self.data[name] if self.param_types.has_key(name): typ, default = self.param_types[name] return default raise KeyError(name)
gpl-2.0
Jgarcia-IAS/SITE
addons/l10n_ar/__init__.py
2120
1456
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com). # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
sebgoa/client-python
kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py
2
8064
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.7.4 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1HorizontalPodAutoscalerStatus(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, current_cpu_utilization_percentage=None, current_replicas=None, desired_replicas=None, last_scale_time=None, observed_generation=None): """ V1HorizontalPodAutoscalerStatus - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'current_cpu_utilization_percentage': 'int', 'current_replicas': 'int', 'desired_replicas': 'int', 'last_scale_time': 'datetime', 'observed_generation': 'int' } self.attribute_map = { 'current_cpu_utilization_percentage': 'currentCPUUtilizationPercentage', 'current_replicas': 'currentReplicas', 'desired_replicas': 'desiredReplicas', 'last_scale_time': 'lastScaleTime', 'observed_generation': 'observedGeneration' } self._current_cpu_utilization_percentage = current_cpu_utilization_percentage self._current_replicas = current_replicas self._desired_replicas = desired_replicas self._last_scale_time = last_scale_time self._observed_generation = observed_generation @property def current_cpu_utilization_percentage(self): """ Gets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. :return: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. :rtype: int """ return self._current_cpu_utilization_percentage @current_cpu_utilization_percentage.setter def current_cpu_utilization_percentage(self, current_cpu_utilization_percentage): """ Sets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. :param current_cpu_utilization_percentage: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. :type: int """ self._current_cpu_utilization_percentage = current_cpu_utilization_percentage @property def current_replicas(self): """ Gets the current_replicas of this V1HorizontalPodAutoscalerStatus. current number of replicas of pods managed by this autoscaler. :return: The current_replicas of this V1HorizontalPodAutoscalerStatus. :rtype: int """ return self._current_replicas @current_replicas.setter def current_replicas(self, current_replicas): """ Sets the current_replicas of this V1HorizontalPodAutoscalerStatus. current number of replicas of pods managed by this autoscaler. :param current_replicas: The current_replicas of this V1HorizontalPodAutoscalerStatus. :type: int """ if current_replicas is None: raise ValueError("Invalid value for `current_replicas`, must not be `None`") self._current_replicas = current_replicas @property def desired_replicas(self): """ Gets the desired_replicas of this V1HorizontalPodAutoscalerStatus. desired number of replicas of pods managed by this autoscaler. :return: The desired_replicas of this V1HorizontalPodAutoscalerStatus. :rtype: int """ return self._desired_replicas @desired_replicas.setter def desired_replicas(self, desired_replicas): """ Sets the desired_replicas of this V1HorizontalPodAutoscalerStatus. desired number of replicas of pods managed by this autoscaler. :param desired_replicas: The desired_replicas of this V1HorizontalPodAutoscalerStatus. :type: int """ if desired_replicas is None: raise ValueError("Invalid value for `desired_replicas`, must not be `None`") self._desired_replicas = desired_replicas @property def last_scale_time(self): """ Gets the last_scale_time of this V1HorizontalPodAutoscalerStatus. last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. :return: The last_scale_time of this V1HorizontalPodAutoscalerStatus. :rtype: datetime """ return self._last_scale_time @last_scale_time.setter def last_scale_time(self, last_scale_time): """ Sets the last_scale_time of this V1HorizontalPodAutoscalerStatus. last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. :param last_scale_time: The last_scale_time of this V1HorizontalPodAutoscalerStatus. :type: datetime """ self._last_scale_time = last_scale_time @property def observed_generation(self): """ Gets the observed_generation of this V1HorizontalPodAutoscalerStatus. most recent generation observed by this autoscaler. :return: The observed_generation of this V1HorizontalPodAutoscalerStatus. :rtype: int """ return self._observed_generation @observed_generation.setter def observed_generation(self, observed_generation): """ Sets the observed_generation of this V1HorizontalPodAutoscalerStatus. most recent generation observed by this autoscaler. :param observed_generation: The observed_generation of this V1HorizontalPodAutoscalerStatus. :type: int """ self._observed_generation = observed_generation def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1HorizontalPodAutoscalerStatus): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
apache-2.0
tejo-esperanto/pasportaservo
maps/urls.py
3
1078
from django.conf.urls import url from django.utils.text import format_lazy from django.utils.translation import pgettext_lazy from .views import ( CountryDataView, EndpointsView, MapStyleView, MapTypeConfigureView, PublicDataView, WorldMapView, ) urlpatterns = [ url(r'^endpoints$', EndpointsView.as_view(), name='gis_endpoints'), url(format_lazy(r'^{places}\.geojson$', places=pgettext_lazy("URL", 'locations')), PublicDataView.as_view(), name='world_map_public_data'), url(format_lazy( r'^(?P<country_code>[A-Z]{{2}})(?:/{book}\:(?P<in_book>(0|1)))?/{places}\.geojson$', book=pgettext_lazy("URL", 'book'), places=pgettext_lazy("URL", 'locations')), CountryDataView.as_view(), name='country_map_data'), url(r'^(?P<style>\w+)-gl-style\.json$', MapStyleView.as_view(), name='map_style'), url(r'^$', WorldMapView.as_view(), name='world_map'), url(format_lazy(r'^{type}\:(?P<map_type>(0|3))/$', type=pgettext_lazy("URL", 'type')), MapTypeConfigureView.as_view(), name='map_type_setup'), ]
agpl-3.0
CO600GOL/Game_of_life
GameEngine/game/data_structures/grid.py
1
1632
""" This module contains the logic representing the grid on which a game is played. A grid, in this sense, is simply a collection cells set into rows and columns. The cells can, for the purposes of the project, only be square. """ from game.data_structures.cell import Cell def create_empty_grid(): """ This function creates an empty ten-by-ten grid for use in initialisation of a grid object. @return The collection of cells to use in a grid. """ cells = [] for x in range(0, 10): cells.append([]) for _y in range(0, 10): cells[x].append(Cell()) # Cells is a 2-dimensional array return cells class Grid(object): """ This class represents a grid board on which a game can be played. The grid contains a number of cells that have one state at any possible point. """ def __init__(self, cell_pattern=create_empty_grid()): """ Ctor - Initialises the grid, with a two-dimensional array of cells. @param cell_pattern If the a cell pattern is input as a parameter, it is that cell pattern that is set. If not all the cells are set to dead. """ self.set_cells(cell_pattern) def get_cells(self): """ This pattern retrieves the cells contained within this grid. @return The grid cells. """ return self._cells def set_cells(self, cells): """ This method sets the cells inside the grid to the given configuration. @param cells The cell configuration to give to the grid. """ self._cells = cells
mit
ianstalk/Flexget
flexget/components/notify/notifiers/cronitor.py
3
2829
import socket from loguru import logger from requests.exceptions import RequestException from flexget import plugin from flexget.event import event from flexget.plugin import PluginWarning from flexget.utils.requests import Session as RequestSession requests = RequestSession(max_retries=3) plugin_name = "cronitor" logger = logger.bind(name=plugin_name) class Cronitor: """ Example:: cronitor: ABC123 Or: cronitor: monitor_code: ABC123 on_start: yes on_abort: no message: Ping host: foo.bar auth_key: secret """ base_url = "https://cronitor.link/{monitor_code}/{status}" schema = { "oneOf": [ { "type": "object", "properties": { "monitor_code": {"type": "string"}, "on_start": {"type": "boolean"}, "on_abort": {"type": "boolean"}, "message": {"type": "string"}, "host": {"type": "string"}, "auth_key": {"type": "string"}, }, "required": ["monitor_code"], "additionalProperties": False, }, {"type": "string"}, ] } @staticmethod def prepare_config(config): if isinstance(config, str): config = {"monitor_code": config} config.setdefault("on_start", True) config.setdefault("on_abort", True) config.setdefault("host", socket.gethostname()) return config def _send_request(self, status, config, task_name): url = self.base_url.format(monitor_code=config["monitor_code"], status=status) message = config.get( "message", "{task} task {status}".format(task=task_name, status=status) ) data = {"msg": message, "host": config["host"]} if config.get("auth_key"): data["auth_key"] = config["auth_key"] try: rsp = requests.get(url, params=data) rsp.raise_for_status() except RequestException as e: raise PluginWarning("Could not report to cronitor: {}".format(e)) def on_task_start(self, task, config): config = self.prepare_config(config) if not config["on_start"]: return self._send_request("run", config, task.name) def on_task_abort(self, task, config): config = self.prepare_config(config) if not config["on_abort"]: return self._send_request("fail", config, task.name) def on_task_exit(self, task, config): config = self.prepare_config(config) self._send_request("complete", config, task.name) @event("plugin.register") def register_plugin(): plugin.register(Cronitor, plugin_name, api_ver=2)
mit
WafaaT/spark-tk
python/sparktk/frame/ops/matrix_covariance_matrix.py
14
2672
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def matrix_covariance_matrix(self, matrix_column_name): """ Compute the Covariance Matrix of matrices stored in a frame Parameters ---------- :param matrix_column_name: Name of the column to compute the covariance matrix on :return: (Frame) returns the frame with a new column storing the covariance matrix for the corresponding matrix Calculate the covariance matrix for each matrix in column 'matrix_column_name' of a frame using the following: Element (i,j) of the covariance matrix for a given matrix X is computed as: ((Xi - Mi)(Xj - Mj)) where Mi is the mean Examples -------- >>> from sparktk import dtypes >>> data = [[1, [[1,2,3,5],[2,3,5,6],[4,6,7,3],[8,9,2,4]]]] >>> schema = [('id', int),('pixeldata', dtypes.matrix)] >>> my_frame = tc.frame.create(data, schema) >>> my_frame.inspect() [#] id pixeldata ============================ [0] 1 [[ 1. 2. 3. 5.] [ 2. 3. 5. 6.] [ 4. 6. 7. 3.] [ 8. 9. 2. 4.]] Compute the covariance matrix for the matrices in 'pixeldata' column of the frame >>> my_frame.matrix_covariance_matrix('pixeldata') A new column gets added to the existing frame storing the covariance matrix >>> my_frame.inspect() [#] id pixeldata ============================ [0] 1 [[ 1. 2. 3. 5.] [ 2. 3. 5. 6.] [ 4. 6. 7. 3.] [ 8. 9. 2. 4.]] <BLANKLINE> [#] CovarianceMatrix_pixeldata ============================================================ [0] [[ 2.91666667 3. -1. -3.75 ] [ 3. 3.33333333 -0.33333333 -5. ] [ -1. -0.33333333 3.33333333 -1. ] [ -3.75 -5. -1. 10.91666667]] """ self._scala.matrixCovarianceMatrix(matrix_column_name)
apache-2.0
robmcmullen/peppy
peppy/major_modes/groovy.py
1
1612
# peppy Copyright (c) 2006-2009 Rob McMullen # Licenced under the GPLv2; see http://peppy.flipturn.org for more info """Groovy programming language editing support. Major mode for editing Groovy files. Supporting actions and minor modes should go here only if they are uniquely applicable to this major mode and can't be used in other major modes. If actions can be used with multiple major modes, they should be put in a separate plugin in the peppy/plugins directory. """ import os import wx import wx.stc from peppy.lib.foldexplorer import * from peppy.lib.autoindent import * from peppy.yapsy.plugins import * from peppy.major import * from peppy.editra.style_specs import unique_keywords from peppy.fundamental import FundamentalMode class GroovyMode(FundamentalMode): """Stub major mode for editing Groovy files. This major mode has been automatically generated and is a boilerplate/ placeholder major mode. Enhancements to this mode are appreciated! """ keyword = 'Groovy' editra_synonym = 'Groovy' stc_lexer_id = wx.stc.STC_LEX_CPP start_line_comment = u'//' end_line_comment = '' icon = 'icons/page_white.png' default_classprefs = ( StrParam('extensions', 'groovy', fullwidth=True), StrParam('keyword_set_0', unique_keywords[77], hidden=False, fullwidth=True), StrParam('keyword_set_1', unique_keywords[78], hidden=False, fullwidth=True), ) class GroovyModePlugin(IPeppyPlugin): """Plugin to register modes and user interface for Groovy """ def getMajorModes(self): yield GroovyMode
gpl-2.0
ademmers/ansible
test/units/module_utils/basic/test_log.py
120
7027
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import syslog from itertools import product import pytest import ansible.module_utils.basic from ansible.module_utils.six import PY3 class TestAnsibleModuleLogSmokeTest: DATA = [u'Text string', u'Toshio くらとみ non-ascii test'] DATA = DATA + [d.encode('utf-8') for d in DATA] DATA += [b'non-utf8 :\xff: test'] # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable def test_smoketest_syslog(self, am, mocker, msg): # These talk to the live daemons on the system. Need to do this to # show that what we send doesn't cause an issue once it gets to the # daemon. These are just smoketests to test that we don't fail. mocker.patch('ansible.module_utils.basic.has_journal', False) am.log(u'Text string') am.log(u'Toshio くらとみ non-ascii test') am.log(b'Byte string') am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8')) am.log(b'non-utf8 :\xff: test') @pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed') # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable def test_smoketest_journal(self, am, mocker, msg): # These talk to the live daemons on the system. Need to do this to # show that what we send doesn't cause an issue once it gets to the # daemon. These are just smoketests to test that we don't fail. mocker.patch('ansible.module_utils.basic.has_journal', True) am.log(u'Text string') am.log(u'Toshio くらとみ non-ascii test') am.log(b'Byte string') am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8')) am.log(b'non-utf8 :\xff: test') class TestAnsibleModuleLogSyslog: """Test the AnsibleModule Log Method""" PY2_OUTPUT_DATA = [ (u'Text string', b'Text string'), (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'.encode('utf-8')), (b'Byte string', b'Byte string'), (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'.encode('utf-8')), (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace').encode('utf-8')), ] PY3_OUTPUT_DATA = [ (u'Text string', u'Text string'), (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'), (b'Byte string', u'Byte string'), (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'), (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')), ] OUTPUT_DATA = PY3_OUTPUT_DATA if PY3 else PY2_OUTPUT_DATA @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin']) def test_no_log(self, am, mocker, no_log): """Test that when no_log is set, logging does not occur""" mock_syslog = mocker.patch('syslog.syslog', autospec=True) mocker.patch('ansible.module_utils.basic.has_journal', False) am.no_log = no_log am.log('unittest no_log') if no_log: assert not mock_syslog.called else: mock_syslog.assert_called_once_with(syslog.LOG_INFO, 'unittest no_log') # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('msg, param, stdin', ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable indirect=['stdin']) def test_output_matches(self, am, mocker, msg, param): """Check that log messages are sent correctly""" mocker.patch('ansible.module_utils.basic.has_journal', False) mock_syslog = mocker.patch('syslog.syslog', autospec=True) am.log(msg) mock_syslog.assert_called_once_with(syslog.LOG_INFO, param) @pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed') class TestAnsibleModuleLogJournal: """Test the AnsibleModule Log Method""" OUTPUT_DATA = [ (u'Text string', u'Text string'), (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'), (b'Byte string', u'Byte string'), (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'), (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')), ] @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin']) def test_no_log(self, am, mocker, no_log): journal_send = mocker.patch('systemd.journal.send') am.no_log = no_log am.log('unittest no_log') if no_log: assert not journal_send.called else: assert journal_send.called == 1 # Message # call_args is a 2-tuple of (arg_list, kwarg_dict) assert journal_send.call_args[1]['MESSAGE'].endswith('unittest no_log'), 'Message was not sent to log' # log adds this journal field assert 'MODULE' in journal_send.call_args[1] assert 'basic.py' in journal_send.call_args[1]['MODULE'] # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('msg, param, stdin', ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable indirect=['stdin']) def test_output_matches(self, am, mocker, msg, param): journal_send = mocker.patch('systemd.journal.send') am.log(msg) assert journal_send.call_count == 1, 'journal.send not called exactly once' assert journal_send.call_args[1]['MESSAGE'].endswith(param) @pytest.mark.parametrize('stdin', ({},), indirect=['stdin']) def test_log_args(self, am, mocker): journal_send = mocker.patch('systemd.journal.send') am.log('unittest log_args', log_args=dict(TEST='log unittest')) assert journal_send.called == 1 assert journal_send.call_args[1]['MESSAGE'].endswith('unittest log_args'), 'Message was not sent to log' # log adds this journal field assert 'MODULE' in journal_send.call_args[1] assert 'basic.py' in journal_send.call_args[1]['MODULE'] # We added this journal field assert 'TEST' in journal_send.call_args[1] assert 'log unittest' in journal_send.call_args[1]['TEST']
gpl-3.0
ashemedai/ansible
lib/ansible/modules/notification/pushbullet.py
70
6195
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: "Willy Barro (@willybarro)" requirements: [ pushbullet.py ] module: pushbullet short_description: Sends notifications to Pushbullet description: - This module sends push notifications via Pushbullet to channels or devices. version_added: "2.0" options: api_key: description: - Push bullet API token required: true channel: description: - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet page. required: false default: null device: description: - The device NAME you wish to send a push notification, as seen on the Pushbullet main page. required: false default: null push_type: description: - Thing you wish to push. required: false default: note choices: [ "note", "link" ] title: description: - Title of the notification. required: true body: description: - Body of the notification, e.g. Details of the fault you're alerting. required: false notes: - Requires pushbullet.py Python package on the remote host. You can install it via pip with ($ pip install pushbullet.py). See U(https://github.com/randomchars/pushbullet.py) ''' EXAMPLES = ''' # Sends a push notification to a device - pushbullet: api_key: "ABC123abc123ABC123abc123ABC123ab" device: "Chrome" title: "You may see this on Google Chrome" # Sends a link to a device - pushbullet: api_key: ABC123abc123ABC123abc123ABC123ab device: Chrome push_type: link title: Ansible Documentation body: http://docs.ansible.com/ # Sends a push notification to a channel - pushbullet: api_key: ABC123abc123ABC123abc123ABC123ab channel: my-awesome-channel title: Broadcasting a message to the #my-awesome-channel folks # Sends a push notification with title and body to a channel - pushbullet: api_key: ABC123abc123ABC123abc123ABC123ab channel: my-awesome-channel title: ALERT! Signup service is down body: Error rate on signup service is over 90% for more than 2 minutes ''' try: from pushbullet import PushBullet from pushbullet.errors import InvalidKeyError, PushError except ImportError: pushbullet_found = False else: pushbullet_found = True # =========================================== # Main # def main(): module = AnsibleModule( argument_spec = dict( api_key = dict(type='str', required=True, no_log=True), channel = dict(type='str', default=None), device = dict(type='str', default=None), push_type = dict(type='str', default="note", choices=['note', 'link']), title = dict(type='str', required=True), body = dict(type='str', default=None), url = dict(type='str', default=None), ), mutually_exclusive = ( ['channel', 'device'], ), supports_check_mode=True ) api_key = module.params['api_key'] channel = module.params['channel'] device = module.params['device'] push_type = module.params['push_type'] title = module.params['title'] body = module.params['body'] url = module.params['url'] if not pushbullet_found: module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py") # Init pushbullet try: pb = PushBullet(api_key) target = None except InvalidKeyError: module.fail_json(msg="Invalid api_key") # Checks for channel/device if device is None and channel is None: module.fail_json(msg="You need to provide a channel or a device.") # Search for given device if device is not None: devices_by_nickname = {} for d in pb.devices: devices_by_nickname[d.nickname] = d if device in devices_by_nickname: target = devices_by_nickname[device] else: module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) # Search for given channel if channel is not None: channels_by_tag = {} for c in pb.channels: channels_by_tag[c.channel_tag] = c if channel in channels_by_tag: target = channels_by_tag[channel] else: module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) # If in check mode, exit saying that we succeeded if module.check_mode: module.exit_json(changed=False, msg="OK") # Send push notification try: if push_type == "link": target.push_link(title, url, body) else: target.push_note(title, body) module.exit_json(changed=False, msg="OK") except PushError as e: module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) module.fail_json(msg="An unknown error has occurred") # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
SHMEDIALIMITED/tallest-tower
node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/runner.py
177
1688
#!/usr/bin/env python import argparse import deflate import inflate from colorama import Fore testDir = 'test-files' outDir = 'test-outs' allPassed = True parser = argparse.ArgumentParser(description='Process command-line arguments') parser.add_argument('--test', metavar='path/to/file', type=str, default='both', nargs='?', help='Which test to run: deflate, inflate, or both') parser.add_argument('--file', '-f', metavar='path/to/file', type=str, nargs='?', help='Path to file to use for test') parser.add_argument('--level', '-l', metavar='#', type=int, nargs='?', help='Compression level') parser.add_argument('--no-delete', const=True, default=False, nargs='?', help='Don\'t delete files produced for test') args = parser.parse_args() delete = not getattr(args, 'no_delete') level = getattr(args, 'level') inFile = getattr(args, 'file') test = getattr(args, 'test') if test == 'deflate' or test == 'both': print Fore.CYAN + 'Running deflate tests' + Fore.RESET passed = True if inFile != None: passed = deflate.runTest(inFile, level, delete, outDir) else: passed = deflate.runAll(level, delete, testDir, outDir) # if we fail one test, we fail the entire test allPassed = allPassed and passed if test == 'inflate' or test == 'both': print Fore.CYAN + 'Running inflate tests' + Fore.RESET passed = True if inFile != None: passed = inflate.runTest(inFile, level, delete, outDir) else: passed = inflate.runAll(level, delete, testDir, outDir) # if we fail one test, we fail the entire test allPassed = allPassed and passed if allPassed: print Fore.GREEN + 'All tests passed!' + Fore.RESET else: print Fore.RED + 'Automated test failed' + Fore.RESET
mit
CodEnFisH/palantir
hadoop/src/contrib/hod/testing/testRingmasterRPCs.py
182
5759
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import unittest, os, sys, re, threading, time import logging myDirectory = os.path.realpath(sys.argv[0]) rootDirectory = re.sub("/testing/.*", "", myDirectory) sys.path.append(rootDirectory) from testing.lib import BaseTestSuite excludes = ['test_MINITEST1', 'test_MINITEST2'] from hodlib.GridServices import * from hodlib.Common.desc import ServiceDesc from hodlib.RingMaster.ringMaster import _LogMasterSources configuration = { 'hod': {}, 'resource_manager': { 'id': 'torque', 'batch-home': '/home/y/' }, 'ringmaster': { 'max-connect' : 2, 'max-master-failures' : 5 }, 'hodring': { }, 'gridservice-mapred': { 'id': 'mapred' } , 'gridservice-hdfs': { 'id': 'hdfs' }, 'servicedesc' : {} , 'nodepooldesc': {} , } # All test-case classes should have the naming convention test_.* class test_MINITEST1(unittest.TestCase): def setUp(self): pass # All testMethods have to have their names start with 'test' def testSuccess(self): pass def testFailure(self): pass def tearDown(self): pass class test_Multiple_Workers(unittest.TestCase): def setUp(self): self.config = configuration self.config['ringmaster']['workers_per_ring'] = 2 hdfsDesc = self.config['servicedesc']['hdfs'] = ServiceDesc(self.config['gridservice-hdfs']) mrDesc = self.config['servicedesc']['mapred'] = ServiceDesc(self.config['gridservice-mapred']) self.hdfs = Hdfs(hdfsDesc, [], 0, 19, workers_per_ring = \ self.config['ringmaster']['workers_per_ring']) self.mr = MapReduce(mrDesc, [],1, 19, workers_per_ring = \ self.config['ringmaster']['workers_per_ring']) self.log = logging.getLogger() pass # All testMethods have to have their names start with 'test' def testWorkersCount(self): self.serviceDict = {} self.serviceDict[self.hdfs.getName()] = self.hdfs self.serviceDict[self.mr.getName()] = self.mr self.rpcSet = _LogMasterSources(self.serviceDict, self.config, None, self.log, None) cmdList = self.rpcSet.getCommand('host1') self.assertEquals(len(cmdList), 2) self.assertEquals(cmdList[0].dict['argv'][0], 'namenode') self.assertEquals(cmdList[1].dict['argv'][0], 'namenode') addParams = ['fs.default.name=host1:51234', 'dfs.http.address=host1:5125' ] self.rpcSet.addMasterParams('host1', addParams) # print "NN is launched" cmdList = self.rpcSet.getCommand('host2') self.assertEquals(len(cmdList), 1) self.assertEquals(cmdList[0].dict['argv'][0], 'jobtracker') addParams = ['mapred.job.tracker=host2:51236', 'mapred.job.tracker.http.address=host2:51237'] self.rpcSet.addMasterParams('host2', addParams) # print "JT is launched" cmdList = self.rpcSet.getCommand('host3') # Verify the workers count per ring : TTs + DNs self.assertEquals(len(cmdList), self.config['ringmaster']['workers_per_ring'] * 2) pass def testFailure(self): pass def tearDown(self): pass class test_GetCommand(unittest.TestCase): def setUp(self): self.config = configuration hdfsDesc = self.config['servicedesc']['hdfs'] = ServiceDesc(self.config['gridservice-hdfs']) mrDesc = self.config['servicedesc']['mapred'] = ServiceDesc(self.config['gridservice-mapred']) # API : serviceObj = service(desc, workDirs, reqNodes, version) self.hdfs = Hdfs(hdfsDesc, [], 0, 17) self.hdfsExternal = HdfsExternal(hdfsDesc, [], 17) self.mr = MapReduce(mrDesc, [],1, 17) self.mrExternal = MapReduceExternal(mrDesc, [], 17) self.log = logging.getLogger() pass # All testMethods have to have their names start with 'test' def testBothInternal(self): self.serviceDict = {} self.serviceDict[self.hdfs.getName()] = self.hdfs self.serviceDict[self.mr.getName()] = self.mr self.rpcSet = _LogMasterSources(self.serviceDict, self.config, None, self.log, None) cmdList = self.rpcSet.getCommand('localhost') self.assertEquals(cmdList.__len__(), 2) self.assertEquals(cmdList[0].dict['argv'][0], 'namenode') self.assertEquals(cmdList[1].dict['argv'][0], 'namenode') pass def tearDown(self): pass class RingmasterRPCsTestSuite(BaseTestSuite): def __init__(self): # suite setup BaseTestSuite.__init__(self, __name__, excludes) pass def cleanUp(self): # suite tearDown pass def RunRingmasterRPCsTests(): # modulename_suite suite = RingmasterRPCsTestSuite() testResult = suite.runTests() suite.cleanUp() return testResult if __name__ == "__main__": RunRingmasterRPCsTests()
apache-2.0
scottferg/web-console
django/views/defaults.py
12
1706
from django import http from django.views.decorators.csrf import requires_csrf_token from django.template import Context, RequestContext, loader # This can be called when CsrfViewMiddleware.process_view has not run, therefore # need @requires_csrf_token in case the template needs {% csrf_token %}. @requires_csrf_token def page_not_found(request, template_name='404.html'): """ Default 404 handler. Templates: `404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ t = loader.get_template(template_name) # You need to create a 404.html template. return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path}))) @requires_csrf_token def server_error(request, template_name='500.html'): """ 500 error handler. Templates: `500.html` Context: None """ t = loader.get_template(template_name) # You need to create a 500.html template. return http.HttpResponseServerError(t.render(Context({}))) def shortcut(request, content_type_id, object_id): # TODO: Remove this in Django 2.0. # This is a legacy view that depends on the contenttypes framework. # The core logic was moved to django.contrib.contenttypes.views after # Django 1.0, but this remains here for backwards compatibility. # Note that the import is *within* this function, rather than being at # module level, because we don't want to assume people have contenttypes # installed. from django.contrib.contenttypes.views import shortcut as real_shortcut return real_shortcut(request, content_type_id, object_id)
bsd-3-clause
arokem/nipype
nipype/pipeline/plugins/ipythonx.py
15
2973
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Parallel workflow execution via IPython controller """ import sys IPython_not_loaded = False try: from IPython import __version__ as IPyversion from IPython.kernel.contexts import ConnectionRefusedError except: IPython_not_loaded = True from .base import (DistributedPluginBase, logger, report_crash) class IPythonXPlugin(DistributedPluginBase): """Execute workflow with ipython """ def __init__(self, plugin_args=None): if IPython_not_loaded: raise ImportError('IPython parallel could not be imported') super(IPythonXPlugin, self).__init__(plugin_args=plugin_args) self.ipyclient = None self.taskclient = None def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline is distributed approaches based on IPython's parallel processing interface """ # retrieve clients again try: name = 'IPython.kernel.client' __import__(name) self.ipyclient = sys.modules[name] except ImportError: raise ImportError("Ipython kernel not found. Parallel execution " \ "will be unavailable") try: self.taskclient = self.ipyclient.TaskClient() except Exception, e: if isinstance(e, ConnectionRefusedError): raise Exception("No IPython clients found.") if isinstance(e, ValueError): raise Exception("Ipython kernel not installed") return super(IPythonXPlugin, self).run(graph, config, updatehash=updatehash) def _get_result(self, taskid): return self.taskclient.get_task_result(taskid, block=False) def _submit_job(self, node, updatehash=False): cmdstr = """import sys from traceback import format_exception traceback=None result=None try: result = task.run(updatehash=updatehash) except: etype, eval, etr = sys.exc_info() traceback = format_exception(etype,eval,etr) result = task.result """ task = self.ipyclient.StringTask(cmdstr, push = dict(task=node, updatehash=updatehash), pull = ['result','traceback']) return self.taskclient.run(task, block = False) def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] node._traceback = result['traceback'] return report_crash(node, traceback=result['traceback']) else: return report_crash(node) def _clear_task(self, taskid): if IPyversion >= '0.10.1': logger.debug("Clearing id: %d"%taskid) self.taskclient.clear(taskid)
bsd-3-clause
terbolous/SickRage
lib/hachoir_core/log.py
86
4235
import os, sys, time import hachoir_core.config as config from hachoir_core.i18n import _ class Log: LOG_INFO = 0 LOG_WARN = 1 LOG_ERROR = 2 level_name = { LOG_WARN: "[warn]", LOG_ERROR: "[err!]", LOG_INFO: "[info]" } def __init__(self): self.__buffer = {} self.__file = None self.use_print = True self.use_buffer = False self.on_new_message = None # Prototype: def func(level, prefix, text, context) def shutdown(self): if self.__file: self._writeIntoFile(_("Stop Hachoir")) def setFilename(self, filename, append=True): """ Use a file to store all messages. The UTF-8 encoding will be used. Write an informative message if the file can't be created. @param filename: C{L{string}} """ # Look if file already exists or not filename = os.path.expanduser(filename) filename = os.path.realpath(filename) append = os.access(filename, os.F_OK) # Create log file (or open it in append mode, if it already exists) try: import codecs if append: self.__file = codecs.open(filename, "a", "utf-8") else: self.__file = codecs.open(filename, "w", "utf-8") self._writeIntoFile(_("Starting Hachoir")) except IOError, err: if err.errno == 2: self.__file = None self.info(_("[Log] setFilename(%s) fails: no such file") % filename) else: raise def _writeIntoFile(self, message): timestamp = time.strftime("%Y-%m-%d %H:%M:%S") self.__file.write(u"%s - %s\n" % (timestamp, message)) self.__file.flush() def newMessage(self, level, text, ctxt=None): """ Write a new message : append it in the buffer, display it to the screen (if needed), and write it in the log file (if needed). @param level: Message level. @type level: C{int} @param text: Message content. @type text: C{str} @param ctxt: The caller instance. """ if level < self.LOG_ERROR and config.quiet or \ level <= self.LOG_INFO and not config.verbose: return if config.debug: from hachoir_core.error import getBacktrace backtrace = getBacktrace(None) if backtrace: text += "\n\n" + backtrace _text = text if hasattr(ctxt, "_logger"): _ctxt = ctxt._logger() if _ctxt is not None: text = "[%s] %s" % (_ctxt, text) # Add message to log buffer if self.use_buffer: if not self.__buffer.has_key(level): self.__buffer[level] = [text] else: self.__buffer[level].append(text) # Add prefix prefix = self.level_name.get(level, "[info]") # Display on stdout (if used) if self.use_print: sys.stdout.flush() sys.stderr.write("%s %s\n" % (prefix, text)) sys.stderr.flush() # Write into outfile (if used) if self.__file: self._writeIntoFile("%s %s" % (prefix, text)) # Use callback (if used) if self.on_new_message: self.on_new_message (level, prefix, _text, ctxt) def info(self, text): """ New informative message. @type text: C{str} """ self.newMessage(Log.LOG_INFO, text) def warning(self, text): """ New warning message. @type text: C{str} """ self.newMessage(Log.LOG_WARN, text) def error(self, text): """ New error message. @type text: C{str} """ self.newMessage(Log.LOG_ERROR, text) log = Log() class Logger(object): def _logger(self): return "<%s>" % self.__class__.__name__ def info(self, text): log.newMessage(Log.LOG_INFO, text, self) def warning(self, text): log.newMessage(Log.LOG_WARN, text, self) def error(self, text): log.newMessage(Log.LOG_ERROR, text, self)
gpl-3.0
tomster/py-transmission
setup.py
4
1401
from setuptools import setup, find_packages # see http://peak.telecommunity.com/DevCenter/setuptools and # http://docs.python.org/dist/meta-data.html#meta-data for setup signature dependencies = ["BitTorrent-bencode>=5.0.8"] setup( name = "TransmissionClient", version = "0.2rc1", packages = find_packages(), # metadata for upload to PyPI author = "Tom Lazar", author_email = "tom@tomster.org", description = "Python bindings for the Transmission BitTorrent Client", license = "MIT License", keywords = "bittorrent transmission", url = "http://code.google.com/p/py-transmission/", zip_safe = True, setup_requires = dependencies, install_requires = dependencies, test_suite = "test.testTransmissionClient", long_description = """Transmission has recently introduced a RPC architecture in which it launches an independent daemon listening on a local socket and exposes a rich API for monitoring and controlling Transmission. This makes it much easier and 'cleaner' to implement clients in other languages, which is what this package aims to do. """, classifiers=["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Communications :: File Sharing", ], )
mit
acshi/osf.io
admin/meetings/serializers.py
4
2015
from website.conferences.model import DEFAULT_FIELD_NAMES def serialize_meeting(meeting): is_meeting = True if hasattr(meeting, 'is_meeting') and meeting.is_meeting is not None: is_meeting = meeting.is_meeting return { 'endpoint': meeting.endpoint, 'name': meeting.name, 'info_url': meeting.info_url, 'homepage_link_text': meeting.field_names.get('homepage_link_text', DEFAULT_FIELD_NAMES.get('homepage_link_text', '')), 'logo_url': meeting.logo_url, 'active': meeting.active, 'admins': meeting.admins.all().values_list('username', flat=True), 'public_projects': meeting.public_projects, 'poster': meeting.poster, 'talk': meeting.talk, 'num_submissions': meeting.num_submissions, 'location': meeting.location, 'start_date': meeting.start_date, 'end_date': meeting.end_date, 'submission1': meeting.field_names.get('submission1', DEFAULT_FIELD_NAMES.get('submission1', '')), 'submission2': meeting.field_names.get('submission2', DEFAULT_FIELD_NAMES.get('submission2', '')), 'submission1_plural': meeting.field_names.get('submission1_plural', DEFAULT_FIELD_NAMES.get('submission1_plural', '')), 'submission2_plural': meeting.field_names.get('submission2_plural', DEFAULT_FIELD_NAMES.get('submission2_plural', '')), 'meeting_title_type': meeting.field_names.get('meeting_title_type', DEFAULT_FIELD_NAMES.get('meeting_title_type', '')), 'add_submission': meeting.field_names.get('add_submission', DEFAULT_FIELD_NAMES.get('add_submission', '')), 'mail_subject': meeting.field_names.get('mail_subject', DEFAULT_FIELD_NAMES.get('mail_subject', '')), 'mail_message_body': meeting.field_names.get('mail_message_body', DEFAULT_FIELD_NAMES.get('mail_message_body', '')), 'mail_attachment': meeting.field_names.get('mail_attachment', DEFAULT_FIELD_NAMES.get('mail_attachment', '')), 'is_meeting': is_meeting, }
apache-2.0
kantai/passe-framework-prototype
django/views/generic/date_based.py
246
14025
import datetime import time from django.template import loader, RequestContext from django.core.exceptions import ObjectDoesNotExist from django.core.xheaders import populate_xheaders from django.db.models.fields import DateTimeField from django.http import Http404, HttpResponse import warnings warnings.warn( 'Function-based generic views have been deprecated; use class-based views instead.', PendingDeprecationWarning ) def archive_index(request, queryset, date_field, num_latest=15, template_name=None, template_loader=loader, extra_context=None, allow_empty=True, context_processors=None, mimetype=None, allow_future=False, template_object_name='latest'): """ Generic top-level archive of date-based objects. Templates: ``<app_label>/<model_name>_archive.html`` Context: date_list List of years latest Latest N (defaults to 15) objects by date """ if extra_context is None: extra_context = {} model = queryset.model if not allow_future: queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()}) date_list = queryset.dates(date_field, 'year')[::-1] if not date_list and not allow_empty: raise Http404("No %s available" % model._meta.verbose_name) if date_list and num_latest: latest = queryset.order_by('-'+date_field)[:num_latest] else: latest = None if not template_name: template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { 'date_list' : date_list, template_object_name : latest, }, context_processors) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value return HttpResponse(t.render(c), mimetype=mimetype) def archive_year(request, year, queryset, date_field, template_name=None, template_loader=loader, extra_context=None, allow_empty=False, context_processors=None, template_object_name='object', mimetype=None, make_object_list=False, allow_future=False): """ Generic yearly archive view. Templates: ``<app_label>/<model_name>_archive_year.html`` Context: date_list List of months in this year with objects year This year object_list List of objects published in the given month (Only available if make_object_list argument is True) """ if extra_context is None: extra_context = {} model = queryset.model now = datetime.datetime.now() lookup_kwargs = {'%s__year' % date_field: year} # Only bother to check current date if the year isn't in the past and future objects aren't requested. if int(year) >= now.year and not allow_future: lookup_kwargs['%s__lte' % date_field] = now date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month') if not date_list and not allow_empty: raise Http404 if make_object_list: object_list = queryset.filter(**lookup_kwargs) else: object_list = [] if not template_name: template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { 'date_list': date_list, 'year': year, '%s_list' % template_object_name: object_list, }, context_processors) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value return HttpResponse(t.render(c), mimetype=mimetype) def archive_month(request, year, month, queryset, date_field, month_format='%b', template_name=None, template_loader=loader, extra_context=None, allow_empty=False, context_processors=None, template_object_name='object', mimetype=None, allow_future=False): """ Generic monthly archive view. Templates: ``<app_label>/<model_name>_archive_month.html`` Context: date_list: List of days in this month with objects month: (date) this month next_month: (date) the first day of the next month, or None if the next month is in the future previous_month: (date) the first day of the previous month object_list: list of objects published in the given month """ if extra_context is None: extra_context = {} try: tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format)) date = datetime.date(*tt[:3]) except ValueError: raise Http404 model = queryset.model now = datetime.datetime.now() # Calculate first and last day of month, for use in a date-range lookup. first_day = date.replace(day=1) if first_day.month == 12: last_day = first_day.replace(year=first_day.year + 1, month=1) else: last_day = first_day.replace(month=first_day.month + 1) lookup_kwargs = { '%s__gte' % date_field: first_day, '%s__lt' % date_field: last_day, } # Only bother to check current date if the month isn't in the past and future objects are requested. if last_day >= now.date() and not allow_future: lookup_kwargs['%s__lte' % date_field] = now object_list = queryset.filter(**lookup_kwargs) date_list = object_list.dates(date_field, 'day') if not object_list and not allow_empty: raise Http404 # Calculate the next month, if applicable. if allow_future: next_month = last_day elif last_day <= datetime.date.today(): next_month = last_day else: next_month = None # Calculate the previous month if first_day.month == 1: previous_month = first_day.replace(year=first_day.year-1,month=12) else: previous_month = first_day.replace(month=first_day.month-1) if not template_name: template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { 'date_list': date_list, '%s_list' % template_object_name: object_list, 'month': date, 'next_month': next_month, 'previous_month': previous_month, }, context_processors) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value return HttpResponse(t.render(c), mimetype=mimetype) def archive_week(request, year, week, queryset, date_field, template_name=None, template_loader=loader, extra_context=None, allow_empty=True, context_processors=None, template_object_name='object', mimetype=None, allow_future=False): """ Generic weekly archive view. Templates: ``<app_label>/<model_name>_archive_week.html`` Context: week: (date) this week object_list: list of objects published in the given week """ if extra_context is None: extra_context = {} try: tt = time.strptime(year+'-0-'+week, '%Y-%w-%U') date = datetime.date(*tt[:3]) except ValueError: raise Http404 model = queryset.model now = datetime.datetime.now() # Calculate first and last day of week, for use in a date-range lookup. first_day = date last_day = date + datetime.timedelta(days=7) lookup_kwargs = { '%s__gte' % date_field: first_day, '%s__lt' % date_field: last_day, } # Only bother to check current date if the week isn't in the past and future objects aren't requested. if last_day >= now.date() and not allow_future: lookup_kwargs['%s__lte' % date_field] = now object_list = queryset.filter(**lookup_kwargs) if not object_list and not allow_empty: raise Http404 if not template_name: template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { '%s_list' % template_object_name: object_list, 'week': date, }) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value return HttpResponse(t.render(c), mimetype=mimetype) def archive_day(request, year, month, day, queryset, date_field, month_format='%b', day_format='%d', template_name=None, template_loader=loader, extra_context=None, allow_empty=False, context_processors=None, template_object_name='object', mimetype=None, allow_future=False): """ Generic daily archive view. Templates: ``<app_label>/<model_name>_archive_day.html`` Context: object_list: list of objects published that day day: (datetime) the day previous_day (datetime) the previous day next_day (datetime) the next day, or None if the current day is today """ if extra_context is None: extra_context = {} try: tt = time.strptime('%s-%s-%s' % (year, month, day), '%s-%s-%s' % ('%Y', month_format, day_format)) date = datetime.date(*tt[:3]) except ValueError: raise Http404 model = queryset.model now = datetime.datetime.now() if isinstance(model._meta.get_field(date_field), DateTimeField): lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))} else: lookup_kwargs = {date_field: date} # Only bother to check current date if the date isn't in the past and future objects aren't requested. if date >= now.date() and not allow_future: lookup_kwargs['%s__lte' % date_field] = now object_list = queryset.filter(**lookup_kwargs) if not allow_empty and not object_list: raise Http404 # Calculate the next day, if applicable. if allow_future: next_day = date + datetime.timedelta(days=1) elif date < datetime.date.today(): next_day = date + datetime.timedelta(days=1) else: next_day = None if not template_name: template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = RequestContext(request, { '%s_list' % template_object_name: object_list, 'day': date, 'previous_day': date - datetime.timedelta(days=1), 'next_day': next_day, }, context_processors) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value return HttpResponse(t.render(c), mimetype=mimetype) def archive_today(request, **kwargs): """ Generic daily archive view for today. Same as archive_day view. """ today = datetime.date.today() kwargs.update({ 'year': str(today.year), 'month': today.strftime('%b').lower(), 'day': str(today.day), }) return archive_day(request, **kwargs) def object_detail(request, year, month, day, queryset, date_field, month_format='%b', day_format='%d', object_id=None, slug=None, slug_field='slug', template_name=None, template_name_field=None, template_loader=loader, extra_context=None, context_processors=None, template_object_name='object', mimetype=None, allow_future=False): """ Generic detail view from year/month/day/slug or year/month/day/id structure. Templates: ``<app_label>/<model_name>_detail.html`` Context: object: the object to be detailed """ if extra_context is None: extra_context = {} try: tt = time.strptime('%s-%s-%s' % (year, month, day), '%s-%s-%s' % ('%Y', month_format, day_format)) date = datetime.date(*tt[:3]) except ValueError: raise Http404 model = queryset.model now = datetime.datetime.now() if isinstance(model._meta.get_field(date_field), DateTimeField): lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))} else: lookup_kwargs = {date_field: date} # Only bother to check current date if the date isn't in the past and future objects aren't requested. if date >= now.date() and not allow_future: lookup_kwargs['%s__lte' % date_field] = now if object_id: lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id elif slug and slug_field: lookup_kwargs['%s__exact' % slug_field] = slug else: raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield") try: obj = queryset.get(**lookup_kwargs) except ObjectDoesNotExist: raise Http404("No %s found for" % model._meta.verbose_name) if not template_name: template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower()) if template_name_field: template_name_list = [getattr(obj, template_name_field), template_name] t = template_loader.select_template(template_name_list) else: t = template_loader.get_template(template_name) c = RequestContext(request, { template_object_name: obj, }, context_processors) for key, value in extra_context.items(): if callable(value): c[key] = value() else: c[key] = value response = HttpResponse(t.render(c), mimetype=mimetype) populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name)) return response
bsd-3-clause
joonro/PyTables
tables/description.py
1
35728
# -*- coding: utf-8 -*- ######################################################################## # # License: BSD # Created: September 21, 2002 # Author: Francesc Alted # # $Id$ # ######################################################################## """Classes for describing columns for ``Table`` objects.""" # Imports # ======= from __future__ import print_function import sys import copy import warnings import numpy from tables import atom from tables.path import check_name_validity # Public variables # ================ __docformat__ = 'reStructuredText' """The format of documentation strings in this module.""" # Private functions # ================= def same_position(oldmethod): """Decorate `oldmethod` to also compare the `_v_pos` attribute.""" def newmethod(self, other): try: other._v_pos except AttributeError: return False # not a column definition return self._v_pos == other._v_pos and oldmethod(self, other) newmethod.__name__ = oldmethod.__name__ newmethod.__doc__ = oldmethod.__doc__ return newmethod # Column classes # ============== class Col(atom.Atom): """Defines a non-nested column. Col instances are used as a means to declare the different properties of a non-nested column in a table or nested column. Col classes are descendants of their equivalent Atom classes (see :ref:`AtomClassDescr`), but their instances have an additional _v_pos attribute that is used to decide the position of the column inside its parent table or nested column (see the IsDescription class in :ref:`IsDescriptionClassDescr` for more information on column positions). In the same fashion as Atom, you should use a particular Col descendant class whenever you know the exact type you will need when writing your code. Otherwise, you may use one of the Col.from_*() factory methods. Each factory method inherited from the Atom class is available with the same signature, plus an additional pos parameter (placed in last position) which defaults to None and that may take an integer value. This parameter might be used to specify the position of the column in the table. Besides, there are the next additional factory methods, available only for Col objects. The following parameters are available for most Col-derived constructors. Parameters ---------- itemsize : int For types with a non-fixed size, this sets the size in bytes of individual items in the column. shape : tuple Sets the shape of the column. An integer shape of N is equivalent to the tuple (N,). dflt Sets the default value for the column. pos : int Sets the position of column in table. If unspecified, the position will be randomly selected. """ # Avoid mangling atom class data. __metaclass__ = type _class_from_prefix = {} # filled as column classes are created """Maps column prefixes to column classes.""" # Class methods # ~~~~~~~~~~~~~ @classmethod def prefix(class_): """Return the column class prefix.""" cname = class_.__name__ return cname[:cname.rfind('Col')] @classmethod def from_atom(class_, atom, pos=None): """Create a Col definition from a PyTables atom. An optional position may be specified as the pos argument. """ prefix = atom.prefix() kwargs = atom._get_init_args() colclass = class_._class_from_prefix[prefix] return colclass(pos=pos, **kwargs) @classmethod def from_sctype(class_, sctype, shape=(), dflt=None, pos=None): """Create a `Col` definition from a NumPy scalar type `sctype`. Optional shape, default value and position may be specified as the `shape`, `dflt` and `pos` arguments, respectively. Information in the `sctype` not represented in a `Col` is ignored. """ newatom = atom.Atom.from_sctype(sctype, shape, dflt) return class_.from_atom(newatom, pos=pos) @classmethod def from_dtype(class_, dtype, dflt=None, pos=None): """Create a `Col` definition from a NumPy `dtype`. Optional default value and position may be specified as the `dflt` and `pos` arguments, respectively. The `dtype` must have a byte order which is irrelevant or compatible with that of the system. Information in the `dtype` not represented in a `Col` is ignored. """ newatom = atom.Atom.from_dtype(dtype, dflt) return class_.from_atom(newatom, pos=pos) @classmethod def from_type(class_, type, shape=(), dflt=None, pos=None): """Create a `Col` definition from a PyTables `type`. Optional shape, default value and position may be specified as the `shape`, `dflt` and `pos` arguments, respectively. """ newatom = atom.Atom.from_type(type, shape, dflt) return class_.from_atom(newatom, pos=pos) @classmethod def from_kind(class_, kind, itemsize=None, shape=(), dflt=None, pos=None): """Create a `Col` definition from a PyTables `kind`. Optional item size, shape, default value and position may be specified as the `itemsize`, `shape`, `dflt` and `pos` arguments, respectively. Bear in mind that not all columns support a default item size. """ newatom = atom.Atom.from_kind(kind, itemsize, shape, dflt) return class_.from_atom(newatom, pos=pos) @classmethod def _subclass_from_prefix(class_, prefix): """Get a column subclass for the given `prefix`.""" cname = '%sCol' % prefix class_from_prefix = class_._class_from_prefix if cname in class_from_prefix: return class_from_prefix[cname] atombase = getattr(atom, '%sAtom' % prefix) class NewCol(class_, atombase): """Defines a non-nested column of a particular type. The constructor accepts the same arguments as the equivalent `Atom` class, plus an additional ``pos`` argument for position information, which is assigned to the `_v_pos` attribute. """ def __init__(self, *args, **kwargs): pos = kwargs.pop('pos', None) class_from_prefix = self._class_from_prefix atombase.__init__(self, *args, **kwargs) # The constructor of an abstract atom may have changed # the class of `self` to something different of `NewCol` # and `atombase` (that's why the prefix map is saved). if self.__class__ is not NewCol: colclass = class_from_prefix[self.prefix()] self.__class__ = colclass self._v_pos = pos __eq__ = same_position(atombase.__eq__) _is_equal_to_atom = same_position(atombase._is_equal_to_atom) # XXX: API incompatible change for PyTables 3 line # Overriding __eq__ blocks inheritance of __hash__ in 3.x # def __hash__(self): # return hash((self._v_pos, self.atombase)) if prefix == 'Enum': _is_equal_to_enumatom = same_position( atombase._is_equal_to_enumatom) NewCol.__name__ = cname class_from_prefix[prefix] = NewCol return NewCol # Special methods # ~~~~~~~~~~~~~~~ def __repr__(self): # Reuse the atom representation. atomrepr = super(Col, self).__repr__() lpar = atomrepr.index('(') rpar = atomrepr.rindex(')') atomargs = atomrepr[lpar + 1:rpar] classname = self.__class__.__name__ return '%s(%s, pos=%s)' % (classname, atomargs, self._v_pos) # Private methods # ~~~~~~~~~~~~~~~ def _get_init_args(self): """Get a dictionary of instance constructor arguments.""" kwargs = dict((arg, getattr(self, arg)) for arg in ('shape', 'dflt')) kwargs['pos'] = getattr(self, '_v_pos', None) return kwargs def _generate_col_classes(): """Generate all column classes.""" # Abstract classes are not in the class map. cprefixes = ['Int', 'UInt', 'Float', 'Time'] for (kind, kdata) in atom.atom_map.iteritems(): if hasattr(kdata, 'kind'): # atom class: non-fixed item size atomclass = kdata cprefixes.append(atomclass.prefix()) else: # dictionary: fixed item size for atomclass in kdata.itervalues(): cprefixes.append(atomclass.prefix()) # Bottom-level complex classes are not in the type map, of course. # We still want the user to get the compatibility warning, though. cprefixes.extend(['Complex32', 'Complex64', 'Complex128']) if hasattr(atom, 'Complex192Atom'): cprefixes.append('Complex192') if hasattr(atom, 'Complex256Atom'): cprefixes.append('Complex256') for cprefix in cprefixes: newclass = Col._subclass_from_prefix(cprefix) yield newclass # Create all column classes. #for _newclass in _generate_col_classes(): # exec('%s = _newclass' % _newclass.__name__) #del _newclass StringCol = Col._subclass_from_prefix('String') BoolCol = Col._subclass_from_prefix('Bool') EnumCol = Col._subclass_from_prefix('Enum') IntCol = Col._subclass_from_prefix('Int') Int8Col = Col._subclass_from_prefix('Int8') Int16Col = Col._subclass_from_prefix('Int16') Int32Col = Col._subclass_from_prefix('Int32') Int64Col = Col._subclass_from_prefix('Int64') UIntCol = Col._subclass_from_prefix('UInt') UInt8Col = Col._subclass_from_prefix('UInt8') UInt16Col = Col._subclass_from_prefix('UInt16') UInt32Col = Col._subclass_from_prefix('UInt32') UInt64Col = Col._subclass_from_prefix('UInt64') FloatCol = Col._subclass_from_prefix('Float') if hasattr(atom, 'Float16Atom'): Float16Col = Col._subclass_from_prefix('Float16') Float32Col = Col._subclass_from_prefix('Float32') Float64Col = Col._subclass_from_prefix('Float64') if hasattr(atom, 'Float96Atom'): Float96Col = Col._subclass_from_prefix('Float96') if hasattr(atom, 'Float128Atom'): Float128Col = Col._subclass_from_prefix('Float128') ComplexCol = Col._subclass_from_prefix('Complex') Complex32Col = Col._subclass_from_prefix('Complex32') Complex64Col = Col._subclass_from_prefix('Complex64') Complex128Col = Col._subclass_from_prefix('Complex128') if hasattr(atom, 'Complex192Atom'): Complex192Col = Col._subclass_from_prefix('Complex192') if hasattr(atom, 'Complex256Atom'): Complex256Col = Col._subclass_from_prefix('Complex256') TimeCol = Col._subclass_from_prefix('Time') Time32Col = Col._subclass_from_prefix('Time32') Time64Col = Col._subclass_from_prefix('Time64') # Table description classes # ========================= class Description(object): """This class represents descriptions of the structure of tables. An instance of this class is automatically bound to Table (see :ref:`TableClassDescr`) objects when they are created. It provides a browseable representation of the structure of the table, made of non-nested (Col - see :ref:`ColClassDescr`) and nested (Description) columns. Column definitions under a description can be accessed as attributes of it (*natural naming*). For instance, if table.description is a Description instance with a column named col1 under it, the later can be accessed as table.description.col1. If col1 is nested and contains a col2 column, this can be accessed as table.description.col1.col2. Because of natural naming, the names of members start with special prefixes, like in the Group class (see :ref:`GroupClassDescr`). .. rubric:: Description attributes .. attribute:: _v_colobjects A dictionary mapping the names of the columns hanging directly from the associated table or nested column to their respective descriptions (Col - see :ref:`ColClassDescr` or Description - see :ref:`DescriptionClassDescr` instances). .. versionchanged:: 3.0 The *_v_colObjects* attobute has been renamed into *_v_colobjects*. .. attribute:: _v_dflts A dictionary mapping the names of non-nested columns hanging directly from the associated table or nested column to their respective default values. .. attribute:: _v_dtype The NumPy type which reflects the structure of this table or nested column. You can use this as the dtype argument of NumPy array factories. .. attribute:: _v_dtypes A dictionary mapping the names of non-nested columns hanging directly from the associated table or nested column to their respective NumPy types. .. attribute:: _v_is_nested Whether the associated table or nested column contains further nested columns or not. .. attribute:: _v_itemsize The size in bytes of an item in this table or nested column. .. attribute:: _v_name The name of this description group. The name of the root group is '/'. .. attribute:: _v_names A list of the names of the columns hanging directly from the associated table or nested column. The order of the names matches the order of their respective columns in the containing table. .. attribute:: _v_nested_descr A nested list of pairs of (name, format) tuples for all the columns under this table or nested column. You can use this as the dtype and descr arguments of NumPy array factories. .. versionchanged:: 3.0 The *_v_nestedDescr* attribute has been renamed into *_v_nested_descr*. .. attribute:: _v_nested_formats A nested list of the NumPy string formats (and shapes) of all the columns under this table or nested column. You can use this as the formats argument of NumPy array factories. .. versionchanged:: 3.0 The *_v_nestedFormats* attribute has been renamed into *_v_nested_formats*. .. attribute:: _v_nestedlvl The level of the associated table or nested column in the nested datatype. .. attribute:: _v_nested_names A nested list of the names of all the columns under this table or nested column. You can use this as the names argument of NumPy array factories. .. versionchanged:: 3.0 The *_v_nestedNames* attribute has been renamed into *_v_nested_names*. .. attribute:: _v_pathname Pathname of the table or nested column. .. attribute:: _v_pathnames A list of the pathnames of all the columns under this table or nested column (in preorder). If it does not contain nested columns, this is exactly the same as the :attr:`Description._v_names` attribute. .. attribute:: _v_types A dictionary mapping the names of non-nested columns hanging directly from the associated table or nested column to their respective PyTables types. """ def __init__(self, classdict, nestedlvl=-1, validate=True): if not classdict: raise ValueError("cannot create an empty data type") # Do a shallow copy of classdict just in case this is going to # be shared by other instances newdict = self.__dict__ newdict["_v_name"] = "/" # The name for root descriptor newdict["_v_names"] = [] newdict["_v_dtypes"] = {} newdict["_v_types"] = {} newdict["_v_dflts"] = {} newdict["_v_colobjects"] = {} newdict["_v_is_nested"] = False nestedFormats = [] nestedDType = [] if not hasattr(newdict, "_v_nestedlvl"): newdict["_v_nestedlvl"] = nestedlvl + 1 cols_with_pos = [] # colum (position, name) pairs cols_no_pos = [] # just column names # Check for special variables and convert column descriptions for (name, descr) in classdict.iteritems(): if name.startswith('_v_'): if name in newdict: # print("Warning!") # special methods &c: copy to newdict, warn about conflicts warnings.warn("Can't set attr %r in description class %r" % (name, self)) else: # print("Special variable!-->", name, classdict[name]) newdict[name] = descr continue # This variable is not needed anymore columns = None if (type(descr) == type(IsDescription) and issubclass(descr, IsDescription)): # print("Nested object (type I)-->", name) columns = descr().columns elif (type(descr.__class__) == type(IsDescription) and issubclass(descr.__class__, IsDescription)): # print("Nested object (type II)-->", name) columns = descr.columns elif isinstance(descr, dict): # print("Nested object (type III)-->", name) columns = descr else: # print("Nested object (type IV)-->", name) descr = copy.copy(descr) # The copies above and below ensure that the structures # provided by the user will remain unchanged even if we # tamper with the values of ``_v_pos`` here. if columns is not None: descr = Description(copy.copy(columns), self._v_nestedlvl) classdict[name] = descr pos = getattr(descr, '_v_pos', None) if pos is None: cols_no_pos.append(name) else: cols_with_pos.append((pos, name)) # Sort field names: # # 1. Fields with explicit positions, according to their # positions (and their names if coincident). # 2. Fields with no position, in alfabetical order. cols_with_pos.sort() cols_no_pos.sort() keys = [name for (pos, name) in cols_with_pos] + cols_no_pos pos = 0 # Get properties for compound types for k in keys: if validate: # Check for key name validity check_name_validity(k) # Class variables object = classdict[k] newdict[k] = object # To allow natural naming if not (isinstance(object, Col) or isinstance(object, Description)): raise TypeError('Passing an incorrect value to a table column.' ' Expected a Col (or subclass) instance and ' 'got: "%s". Please make use of the Col(), or ' 'descendant, constructor to properly ' 'initialize columns.' % object) object._v_pos = pos # Set the position of this object object._v_parent = self # The parent description pos += 1 newdict['_v_colobjects'][k] = object newdict['_v_names'].append(k) object.__dict__['_v_name'] = k if not isinstance(k, str): # numpy only accepts "str" for field names if sys.version_info[0] < 3: # Python 2.x: unicode --> str kk = k.encode() # use the default encoding else: # Python 3.x: bytes --> str (unicode) kk = k.decode() else: kk = k if isinstance(object, Col): dtype = object.dtype newdict['_v_dtypes'][k] = dtype newdict['_v_types'][k] = object.type newdict['_v_dflts'][k] = object.dflt nestedFormats.append(object.recarrtype) baserecarrtype = dtype.base.str[1:] nestedDType.append((kk, baserecarrtype, dtype.shape)) else: # A description nestedFormats.append(object._v_nested_formats) nestedDType.append((kk, object._v_dtype)) # Assign the format list to _v_nested_formats newdict['_v_nested_formats'] = nestedFormats newdict['_v_dtype'] = numpy.dtype(nestedDType) # _v_itemsize is derived from the _v_dtype that already computes this newdict['_v_itemsize'] = newdict['_v_dtype'].itemsize if self._v_nestedlvl == 0: # Get recursively nested _v_nested_names and _v_nested_descr attrs self._g_set_nested_names_descr() # Get pathnames for nested groups self._g_set_path_names() # Check the _v_byteorder has been used an issue an Error if hasattr(self, "_v_byteorder"): raise ValueError( "Using a ``_v_byteorder`` in the description is obsolete. " "Use the byteorder parameter in the constructor instead.") def _g_set_nested_names_descr(self): """Computes the nested names and descriptions for nested datatypes.""" names = self._v_names fmts = self._v_nested_formats self._v_nested_names = names[:] # Important to do a copy! self._v_nested_descr = list(zip(names, fmts)) for i, name in enumerate(names): new_object = self._v_colobjects[name] if isinstance(new_object, Description): new_object._g_set_nested_names_descr() # replace the column nested name by a correct tuple self._v_nested_names[i] = (name, new_object._v_nested_names) self._v_nested_descr[i] = (name, new_object._v_nested_descr) # set the _v_is_nested flag self._v_is_nested = True def _g_set_path_names(self): """Compute the pathnames for arbitrary nested descriptions. This method sets the ``_v_pathname`` and ``_v_pathnames`` attributes of all the elements (both descriptions and columns) in this nested description. """ def get_cols_in_order(description): return [description._v_colobjects[colname] for colname in description._v_names] def join_paths(path1, path2): if not path1: return path2 return '%s/%s' % (path1, path2) # The top of the stack always has a nested description # and a list of its child columns # (be they nested ``Description`` or non-nested ``Col`` objects). # In the end, the list contains only a list of column paths # under this one. # # For instance, given this top of the stack:: # # (<Description X>, [<Column A>, <Column B>]) # # After computing the rest of the stack, the top is:: # # (<Description X>, ['a', 'a/m', 'a/n', ... , 'b', ...]) stack = [] # We start by pushing the top-level description # and its child columns. self._v_pathname = '' stack.append((self, get_cols_in_order(self))) while stack: desc, cols = stack.pop() head = cols[0] # What's the first child in the list? if isinstance(head, Description): # A nested description. We remove it from the list and # push it with its child columns. This will be the next # handled description. head._v_pathname = join_paths(desc._v_pathname, head._v_name) stack.append((desc, cols[1:])) # alter the top stack.append((head, get_cols_in_order(head))) # new top elif isinstance(head, Col): # A non-nested column. We simply remove it from the # list and append its name to it. head._v_pathname = join_paths(desc._v_pathname, head._v_name) cols.append(head._v_name) # alter the top stack.append((desc, cols[1:])) # alter the top else: # Since paths and names are appended *to the end* of # children lists, a string signals that no more children # remain to be processed, so we are done with the # description at the top of the stack. assert isinstance(head, basestring) # Assign the computed set of descendent column paths. desc._v_pathnames = cols if len(stack) > 0: # Compute the paths with respect to the parent node # (including the path of the current description) # and append them to its list. descName = desc._v_name colPaths = [join_paths(descName, path) for path in cols] colPaths.insert(0, descName) parentCols = stack[-1][1] parentCols.extend(colPaths) # (Nothing is pushed, we are done with this description.) def _f_walk(self, type='All'): """Iterate over nested columns. If type is 'All' (the default), all column description objects (Col and Description instances) are yielded in top-to-bottom order (preorder). If type is 'Col' or 'Description', only column descriptions of that type are yielded. """ if type not in ["All", "Col", "Description"]: raise ValueError("""\ type can only take the parameters 'All', 'Col' or 'Description'.""") stack = [self] while stack: object = stack.pop(0) # pop at the front so as to ensure the order if type in ["All", "Description"]: yield object # yield description for name in object._v_names: new_object = object._v_colobjects[name] if isinstance(new_object, Description): stack.append(new_object) else: if type in ["All", "Col"]: yield new_object # yield column def __repr__(self): """Gives a detailed Description column representation.""" rep = ['%s\"%s\": %r' % (" " * self._v_nestedlvl, k, self._v_colobjects[k]) for k in self._v_names] return '{\n %s}' % (',\n '.join(rep)) def __str__(self): """Gives a brief Description representation.""" return 'Description(%s)' % self._v_nested_descr class MetaIsDescription(type): """Helper metaclass to return the class variables as a dictionary.""" def __new__(cls, classname, bases, classdict): """Return a new class with a "columns" attribute filled.""" newdict = {"columns": {}, } if '__doc__' in classdict: newdict['__doc__'] = classdict['__doc__'] for b in bases: if "columns" in b.__dict__: newdict["columns"].update(b.__dict__["columns"]) for k in classdict: # if not (k.startswith('__') or k.startswith('_v_')): # We let pass _v_ variables to configure class behaviour if not (k.startswith('__')): newdict["columns"][k] = classdict[k] # Return a new class with the "columns" attribute filled return type.__new__(cls, classname, bases, newdict) class IsDescription(object): """Description of the structure of a table or nested column. This class is designed to be used as an easy, yet meaningful way to describe the structure of new Table (see :ref:`TableClassDescr`) datasets or nested columns through the definition of *derived classes*. In order to define such a class, you must declare it as descendant of IsDescription, with as many attributes as columns you want in your table. The name of each attribute will become the name of a column, and its value will hold a description of it. Ordinary columns can be described using instances of the Col class (see :ref:`ColClassDescr`). Nested columns can be described by using classes derived from IsDescription, instances of it, or name-description dictionaries. Derived classes can be declared in place (in which case the column takes the name of the class) or referenced by name. Nested columns can have a _v_pos special attribute which sets the *relative* position of the column among sibling columns *also having explicit positions*. The pos constructor argument of Col instances is used for the same purpose. Columns with no explicit position will be placed afterwards in alphanumeric order. Once you have created a description object, you can pass it to the Table constructor, where all the information it contains will be used to define the table structure. .. rubric:: IsDescription attributes .. attribute:: _v_pos Sets the position of a possible nested column description among its sibling columns. This attribute can be specified *when declaring* an IsDescription subclass to complement its *metadata*. .. attribute:: columns Maps the name of each column in the description to its own descriptive object. This attribute is *automatically created* when an IsDescription subclass is declared. Please note that declared columns can no longer be accessed as normal class variables after its creation. """ __metaclass__ = MetaIsDescription def descr_from_dtype(dtype_): """Get a description instance and byteorder from a (nested) NumPy dtype.""" fields = {} fbyteorder = '|' for name in dtype_.names: dtype, pos = dtype_.fields[name][:2] kind = dtype.base.kind byteorder = dtype.base.byteorder if byteorder in '><=': if fbyteorder not in ['|', byteorder]: raise NotImplementedError( "structured arrays with mixed byteorders " "are not supported yet, sorry") fbyteorder = byteorder # Non-nested column if kind in 'biufSUc': col = Col.from_dtype(dtype, pos=pos) # Nested column elif kind == 'V' and dtype.shape in [(), (1,)]: if dtype.shape != (): warnings.warn( "nested descriptions will be converted to scalar") col, _ = descr_from_dtype(dtype.base) col._v_pos = pos else: raise NotImplementedError( "structured arrays with columns with type description ``%s`` " "are not supported yet, sorry" % dtype) fields[name] = col return Description(fields), fbyteorder def dtype_from_descr(descr, byteorder=None): """Get a (nested) NumPy dtype from a description instance and byteorder. The descr parameter can be a Description or IsDescription instance, sub-class of IsDescription or a dictionary. """ if isinstance(descr, dict): descr = Description(descr) elif (type(descr) == type(IsDescription) and issubclass(descr, IsDescription)): descr = Description(descr().columns) elif isinstance(descr, IsDescription): descr = Description(descr.columns) elif not isinstance(descr, Description): raise ValueError('invalid description: %r' % descr) dtype_ = descr._v_dtype if byteorder and byteorder != '|': dtype_ = dtype_.newbyteorder(byteorder) return dtype_ if __name__ == "__main__": """Test code.""" class Info(IsDescription): _v_pos = 2 Name = UInt32Col() Value = Float64Col() class Test(IsDescription): """A description that has several columns.""" x = Col.from_type("int32", 2, 0, pos=0) y = Col.from_kind('float', dflt=1, shape=(2, 3)) z = UInt8Col(dflt=1) color = StringCol(2, dflt=" ") # color = UInt32Col(2) Info = Info() class info(IsDescription): _v_pos = 1 name = UInt32Col() value = Float64Col(pos=0) y2 = Col.from_kind('float', dflt=1, shape=(2, 3), pos=1) z2 = UInt8Col(dflt=1) class info2(IsDescription): y3 = Col.from_kind('float', dflt=1, shape=(2, 3)) z3 = UInt8Col(dflt=1) name = UInt32Col() value = Float64Col() class info3(IsDescription): name = UInt32Col() value = Float64Col() y4 = Col.from_kind('float', dflt=1, shape=(2, 3)) z4 = UInt8Col(dflt=1) # class Info(IsDescription): # _v_pos = 2 # Name = StringCol(itemsize=2) # Value = ComplexCol(itemsize=16) # class Test(IsDescription): # """A description that has several columns""" # x = Col.from_type("int32", 2, 0, pos=0) # y = Col.from_kind('float', dflt=1, shape=(2,3)) # z = UInt8Col(dflt=1) # color = StringCol(2, dflt=" ") # Info = Info() # class info(IsDescription): # _v_pos = 1 # name = StringCol(itemsize=2) # value = ComplexCol(itemsize=16, pos=0) # y2 = Col.from_kind('float', dflt=1, shape=(2,3), pos=1) # z2 = UInt8Col(dflt=1) # class info2(IsDescription): # y3 = Col.from_kind('float', dflt=1, shape=(2,3)) # z3 = UInt8Col(dflt=1) # name = StringCol(itemsize=2) # value = ComplexCol(itemsize=16) # class info3(IsDescription): # name = StringCol(itemsize=2) # value = ComplexCol(itemsize=16) # y4 = Col.from_kind('float', dflt=1, shape=(2,3)) # z4 = UInt8Col(dflt=1) # example cases of class Test klass = Test() # klass = Info() desc = Description(klass.columns) print("Description representation (short) ==>", desc) print("Description representation (long) ==>", repr(desc)) print("Column names ==>", desc._v_names) print("Column x ==>", desc.x) print("Column Info ==>", desc.Info) print("Column Info.value ==>", desc.Info.Value) print("Nested column names ==>", desc._v_nested_names) print("Defaults ==>", desc._v_dflts) print("Nested Formats ==>", desc._v_nested_formats) print("Nested Descriptions ==>", desc._v_nested_descr) print("Nested Descriptions (info) ==>", desc.info._v_nested_descr) print("Total size ==>", desc._v_dtype.itemsize) # check _f_walk for object in desc._f_walk(): if isinstance(object, Description): print("******begin object*************", end=' ') print("name -->", object._v_name) # print("name -->", object._v_dtype.name) # print("object childs-->", object._v_names) # print("object nested childs-->", object._v_nested_names) print("totalsize-->", object._v_dtype.itemsize) else: # pass print("leaf -->", object._v_name, object.dtype) class testDescParent(IsDescription): c = Int32Col() class testDesc(testDescParent): pass assert 'c' in testDesc.columns ## Local Variables: ## mode: python ## py-indent-offset: 4 ## tab-width: 4 ## fill-column: 72 ## End:
bsd-3-clause
Cartrdge/thumbor-plugins
thumbor_plugins/optimizers/pngquant.py
2
1448
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license import os import subprocess from thumbor.optimizers import BaseOptimizer from thumbor.utils import logger class Optimizer(BaseOptimizer): def __init__(self, context): super(Optimizer, self).__init__(context) self.runnable = True self.pngquant_path = self.context.config.PNGQUANT_PATH self.pngquant_quality = self.context.config.PNGQUANT_QUALITY or '65-80' self.pngquant_speed = self.context.config.PNGQUANT_SPEED or '3' if not (os.path.isfile(self.pngquant_path) and os.access(self.pngquant_path, os.X_OK)): logger.error("ERROR pnqquant path '{0}' is not accessible".format(self.pngquant_path)) self.runnable = False def should_run(self, image_extension, buffer): return 'png' in image_extension and self.runnable def optimize(self, buffer, input_file, output_file): command = 'cat %s | %s --speed %s --quality=%s - > %s' % ( input_file, self.pngquant_path, self.pngquant_speed, self.pngquant_quality, output_file, ) with open(os.devnull) as null: logger.debug("[PNGQUANT] running: " + command) subprocess.call(command, shell=True, stdin=null)
mit
azaghal/ansible
lib/ansible/plugins/lookup/dict.py
86
2219
# (c) 2014, Kent R. Spillner <kspillner@acm.org> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: dict version_added: "1.5" short_description: returns key/value pair items from dictionaries description: - Takes dictionaries as input and returns a list with each item in the list being a dictionary with 'key' and 'value' as keys to the previous dictionary's structure. options: _terms: description: - A list of dictionaries required: True """ EXAMPLES = """ vars: users: alice: name: Alice Appleworth telephone: 123-456-7890 bob: name: Bob Bananarama telephone: 987-654-3210 tasks: # with predefined vars - name: Print phone records debug: msg: "User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})" loop: "{{ lookup('dict', users) }}" # with inline dictionary - name: show dictionary debug: msg: "{{item.key}}: {{item.value}}" with_dict: {a: 1, b: 2, c: 3} # Items from loop can be used in when: statements - name: set_fact when alice in key set_fact: alice_exists: true loop: "{{ lookup('dict', users) }}" when: "'alice' in item.key" """ RETURN = """ _list: description: - list of composed dictonaries with key and value type: list """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common._collections_compat import Mapping class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): # FIXME: can remove once with_ special case is removed if not isinstance(terms, list): terms = [terms] results = [] for term in terms: # Expect any type of Mapping, notably hostvars if not isinstance(term, Mapping): raise AnsibleError("with_dict expects a dict") results.extend(self._flatten_hash_to_list(term)) return results
gpl-3.0
ropik/chromium
tools/safely-roll-webkit.py
9
2418
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate a CL to roll webkit to the specified revision number and post it to Rietveld so that the CL will land automatically if it passes the commit-queue's checks. """ import logging import optparse import os import re import sys import find_depot_tools import scm import subprocess2 def die_with_error(msg): print >> sys.stderr, msg sys.exit(1) def process_deps(path, new_rev): """Update webkit_revision to |new_issue|. A bit hacky, could it be made better? """ content = open(path).read() old_line = r'(\s+)"webkit_revision": "(\d+)",' new_line = r'\1"webkit_revision": "%d",' % new_rev new_content = re.sub(old_line, new_line, content, 1) if new_content == content: die_with_error('Failed to update the DEPS file') open(path, 'w').write(new_content) def main(): tool_dir = os.path.dirname(os.path.abspath(__file__)) parser = optparse.OptionParser(usage='<new webkit rev>') parser.add_option('-v', '--verbose', action='count', default=0) options, args = parser.parse_args() logging.basicConfig( level= [logging.WARNING, logging.INFO, logging.DEBUG][ min(2, options.verbose)]) if len(args) != 1: parser.error('Need only one arg: new webkit revision to roll to.') root_dir = os.path.dirname(tool_dir) os.chdir(root_dir) new_rev = int(args[0]) msg = 'Roll webkit revision to %s' % new_rev print msg # Silence the editor. os.environ['EDITOR'] = 'true' old_branch = scm.GIT.GetBranch(root_dir) if old_branch == 'webkit_roll': parser.error( 'Please delete the branch webkit_roll and move to a different branch') subprocess2.check_output( ['git', 'checkout', '-b', 'webkit_roll', 'origin/master']) try: process_deps(os.path.join(root_dir, 'DEPS'), new_rev) commit_msg = msg + '\n\nTBR=\n' subprocess2.check_output(['git', 'commit', '-m', commit_msg, 'DEPS']) subprocess2.check_call(['git', 'diff', 'origin/master']) subprocess2.check_call(['git', 'cl', 'upload', '--use-commit-queue']) finally: subprocess2.check_output(['git', 'checkout', old_branch]) subprocess2.check_output(['git', 'branch', '-D', 'webkit_roll']) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/SQLAlchemy-1.0.6/test/orm/test_instrumentation.py
31
18708
from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import MetaData, Integer, ForeignKey, util, event from sqlalchemy.orm import mapper, relationship, create_session, \ attributes, class_mapper, clear_mappers, instrumentation, events from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.testing import eq_, ne_ from sqlalchemy.testing import fixtures from sqlalchemy import testing class InitTest(fixtures.ORMTest): def fixture(self): return Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('type', Integer), Column('x', Integer), Column('y', Integer)) def register(self, cls, canary): original_init = cls.__init__ instrumentation.register_class(cls) ne_(cls.__init__, original_init) manager = instrumentation.manager_of_class(cls) def init(state, args, kwargs): canary.append((cls, 'init', state.class_)) event.listen(manager, 'init', init, raw=True) def test_ai(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) obj = A() eq_(inits, [(A, '__init__')]) def test_A(self): inits = [] class A(object): pass self.register(A, inits) obj = A() eq_(inits, [(A, 'init', A)]) def test_Ai(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) def test_ai_B(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) class B(A): pass self.register(B, inits) obj = A() eq_(inits, [(A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) def test_ai_Bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) obj = A() eq_(inits, [(A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) def test_Ai_bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')]) def test_Ai_Bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) def test_Ai_B(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) def test_Ai_Bi_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'), (A, '__init__')]) def test_Ai_bi_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'), (A, '__init__')]) def test_Ai_b_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(A, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')]) def test_Ai_B_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')]) def test_Ai_B_C(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (A, '__init__')]) def test_A_Bi_C(self): inits = [] class A(object): pass self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (B, '__init__')]) def test_A_B_Ci(self): inits = [] class A(object): pass self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B)]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__')]) def test_A_B_C(self): inits = [] class A(object): pass self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B)]) del inits[:] obj = C() eq_(inits, [(C, 'init', C)]) def test_defaulted_init(self): class X(object): def __init__(self_, a, b=123, c='abc'): self_.a = a self_.b = b self_.c = c instrumentation.register_class(X) o = X('foo') eq_(o.a, 'foo') eq_(o.b, 123) eq_(o.c, 'abc') class Y(object): unique = object() class OutOfScopeForEval(object): def __repr__(self_): # misleading repr return '123' outofscope = OutOfScopeForEval() def __init__(self_, u=unique, o=outofscope): self_.u = u self_.o = o instrumentation.register_class(Y) o = Y() assert o.u is Y.unique assert o.o is Y.outofscope class MapperInitTest(fixtures.ORMTest): def fixture(self): return Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('type', Integer), Column('x', Integer), Column('y', Integer)) def test_partially_mapped_inheritance(self): class A(object): pass class B(A): pass class C(B): def __init__(self, x): pass m = mapper(A, self.fixture()) # B is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, B) # C is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, C) def test_del_warning(self): class A(object): def __del__(self): pass assert_raises_message( sa.exc.SAWarning, r"__del__\(\) method on class " "<class '.*\.A'> will cause " "unreachable cycles and memory leaks, as SQLAlchemy " "instrumentation often creates reference cycles. " "Please remove this method.", mapper, A, self.fixture() ) class OnLoadTest(fixtures.ORMTest): """Check that Events.load is not hit in regular attributes operations.""" def test_basic(self): import pickle global A class A(object): pass def canary(instance): assert False try: instrumentation.register_class(A) manager = instrumentation.manager_of_class(A) event.listen(manager, 'load', canary) a = A() p_a = pickle.dumps(a) re_a = pickle.loads(p_a) finally: del A class NativeInstrumentationTest(fixtures.ORMTest): def test_register_reserved_attribute(self): class T(object): pass instrumentation.register_class(T) manager = instrumentation.manager_of_class(T) sa = instrumentation.ClassManager.STATE_ATTR ma = instrumentation.ClassManager.MANAGER_ATTR fails = lambda method, attr: assert_raises( KeyError, getattr(manager, method), attr, property()) fails('install_member', sa) fails('install_member', ma) fails('install_descriptor', sa) fails('install_descriptor', ma) def test_mapped_stateattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column(instrumentation.ClassManager.STATE_ATTR, Integer)) class T(object): pass assert_raises(KeyError, mapper, T, t) def test_mapped_managerattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column(instrumentation.ClassManager.MANAGER_ATTR, Integer)) class T(object): pass assert_raises(KeyError, mapper, T, t) class Py3KFunctionInstTest(fixtures.ORMTest): __requires__ = ("python3", ) def _instrument(self, cls): manager = instrumentation.register_class(cls) canary = [] def check(target, args, kwargs): canary.append((args, kwargs)) event.listen(manager, "init", check) return cls, canary def test_kw_only_args(self): cls, canary = self._kw_only_fixture() a = cls("a", b="b", c="c") eq_(canary, [(('a', ), {'b': 'b', 'c': 'c'})]) def test_kw_plus_posn_args(self): cls, canary = self._kw_plus_posn_fixture() a = cls("a", 1, 2, 3, b="b", c="c") eq_(canary, [(('a', 1, 2, 3), {'b': 'b', 'c': 'c'})]) def test_kw_only_args_plus_opt(self): cls, canary = self._kw_opt_fixture() a = cls("a", b="b") eq_(canary, [(('a', ), {'b': 'b', 'c': 'c'})]) canary[:] = [] a = cls("a", b="b", c="d") eq_(canary, [(('a', ), {'b': 'b', 'c': 'd'})]) def test_kw_only_sig(self): cls, canary = self._kw_only_fixture() assert_raises( TypeError, cls, "a", "b", "c" ) def test_kw_plus_opt_sig(self): cls, canary = self._kw_only_fixture() assert_raises( TypeError, cls, "a", "b", "c" ) assert_raises( TypeError, cls, "a", "b", c="c" ) if util.py3k: _locals = {} exec(""" def _kw_only_fixture(self): class A(object): def __init__(self, a, *, b, c): self.a = a self.b = b self.c = c return self._instrument(A) def _kw_plus_posn_fixture(self): class A(object): def __init__(self, a, *args, b, c): self.a = a self.b = b self.c = c return self._instrument(A) def _kw_opt_fixture(self): class A(object): def __init__(self, a, *, b, c="c"): self.a = a self.b = b self.c = c return self._instrument(A) """, _locals) for k in _locals: setattr(Py3KFunctionInstTest, k, _locals[k]) class MiscTest(fixtures.ORMTest): """Seems basic, but not directly covered elsewhere!""" def test_compileonattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer)) class A(object): pass mapper(A, t) a = A() assert a.id is None def test_compileonattr_rel(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class A(object): pass class B(object): pass mapper(A, t1, properties=dict(bs=relationship(B))) mapper(B, t2) a = A() assert not a.bs def test_uninstrument(self): class A(object): pass manager = instrumentation.register_class(A) attributes.register_attribute(A, 'x', uselist=False, useobject=False) assert instrumentation.manager_of_class(A) is manager instrumentation.unregister_class(A) assert instrumentation.manager_of_class(A) is None assert not hasattr(A, 'x') # I prefer 'is' here but on pypy # it seems only == works assert A.__init__ == object.__init__ def test_compileonattr_rel_backref_a(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class Base(object): def __init__(self, *args, **kwargs): pass for base in object, Base: class A(base): pass class B(base): pass mapper(A, t1, properties=dict(bs=relationship(B, backref='a'))) mapper(B, t2) b = B() assert b.a is None a = A() b.a = a session = create_session() session.add(b) assert a in session, "base is %s" % base def test_compileonattr_rel_backref_b(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class Base(object): def __init__(self): pass class Base_AKW(object): def __init__(self, *args, **kwargs): pass for base in object, Base, Base_AKW: class A(base): pass class B(base): pass mapper(A, t1) mapper(B, t2, properties=dict(a=relationship(A, backref='bs'))) a = A() b = B() b.a = a session = create_session() session.add(a) assert b in session, 'base: %s' % base
mit
sometallgit/AutoUploader
Python27/Lib/user.py
313
1627
"""Hook to allow user-specified customization code to run. As a policy, Python doesn't run user-specified code on startup of Python programs (interactive sessions execute the script specified in the PYTHONSTARTUP environment variable if it exists). However, some programs or sites may find it convenient to allow users to have a standard customization file, which gets run when a program requests it. This module implements such a mechanism. A program that wishes to use the mechanism must execute the statement import user The user module looks for a file .pythonrc.py in the user's home directory and if it can be opened, execfile()s it in its own global namespace. Errors during this phase are not caught; that's up to the program that imports the user module, if it wishes. The user's .pythonrc.py could conceivably test for sys.version if it wishes to do different things depending on the Python version. """ from warnings import warnpy3k warnpy3k("the user module has been removed in Python 3.0", stacklevel=2) del warnpy3k import os home = os.curdir # Default if 'HOME' in os.environ: home = os.environ['HOME'] elif os.name == 'posix': home = os.path.expanduser("~/") elif os.name == 'nt': # Contributed by Jeff Bauer if 'HOMEPATH' in os.environ: if 'HOMEDRIVE' in os.environ: home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH'] else: home = os.environ['HOMEPATH'] pythonrc = os.path.join(home, ".pythonrc.py") try: f = open(pythonrc) except IOError: pass else: f.close() execfile(pythonrc)
mit
lnielsen/invenio
invenio/modules/ranker/registry.py
4
1415
# -*- coding: utf-8 -*- # ## This file is part of Invenio. ## Copyright (C) 2013, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. import os from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \ ModuleAutoDiscoveryRegistry, RegistryProxy from invenio.utils.datastructures import LazyDict rankext = RegistryProxy('rankext', ModuleAutoDiscoveryRegistry, 'rankext') configuration_proxy = RegistryProxy('rankext.configuration', PkgResourcesDirDiscoveryRegistry, 'configuration', registry_namespace=rankext) configuration = LazyDict(lambda: dict((os.path.basename(f), f) for f in configuration_proxy))
gpl-2.0
40223136/w17test2
static/Brython3.1.3-20150514-095342/Lib/abc.py
765
8057
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Abstract Base Classes (ABCs) according to PEP 3119.""" from _weakrefset import WeakSet def abstractmethod(funcobj): """A decorator indicating abstract methods. Requires that the metaclass is ABCMeta or derived from it. A class that has a metaclass derived from ABCMeta cannot be instantiated unless all of its abstract methods are overridden. The abstract methods can be called using any of the normal 'super' call mechanisms. Usage: class C(metaclass=ABCMeta): @abstractmethod def my_abstract_method(self, ...): ... """ funcobj.__isabstractmethod__ = True return funcobj class abstractclassmethod(classmethod): """ A decorator indicating abstract classmethods. Similar to abstractmethod. Usage: class C(metaclass=ABCMeta): @abstractclassmethod def my_abstract_classmethod(cls, ...): ... 'abstractclassmethod' is deprecated. Use 'classmethod' with 'abstractmethod' instead. """ __isabstractmethod__ = True def __init__(self, callable): callable.__isabstractmethod__ = True super().__init__(callable) class abstractstaticmethod(staticmethod): """ A decorator indicating abstract staticmethods. Similar to abstractmethod. Usage: class C(metaclass=ABCMeta): @abstractstaticmethod def my_abstract_staticmethod(...): ... 'abstractstaticmethod' is deprecated. Use 'staticmethod' with 'abstractmethod' instead. """ __isabstractmethod__ = True def __init__(self, callable): callable.__isabstractmethod__ = True super().__init__(callable) class abstractproperty(property): """ A decorator indicating abstract properties. Requires that the metaclass is ABCMeta or derived from it. A class that has a metaclass derived from ABCMeta cannot be instantiated unless all of its abstract properties are overridden. The abstract properties can be called using any of the normal 'super' call mechanisms. Usage: class C(metaclass=ABCMeta): @abstractproperty def my_abstract_property(self): ... This defines a read-only property; you can also define a read-write abstract property using the 'long' form of property declaration: class C(metaclass=ABCMeta): def getx(self): ... def setx(self, value): ... x = abstractproperty(getx, setx) 'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' instead. """ __isabstractmethod__ = True class ABCMeta(type): """Metaclass for defining Abstract Base Classes (ABCs). Use this metaclass to create an ABC. An ABC can be subclassed directly, and then acts as a mix-in class. You can also register unrelated concrete classes (even built-in classes) and unrelated ABCs as 'virtual subclasses' -- these and their descendants will be considered subclasses of the registering ABC by the built-in issubclass() function, but the registering ABC won't show up in their MRO (Method Resolution Order) nor will method implementations defined by the registering ABC be callable (not even via super()). """ # A global counter that is incremented each time a class is # registered as a virtual subclass of anything. It forces the # negative cache to be cleared before its next use. _abc_invalidation_counter = 0 def __new__(mcls, name, bases, namespace): cls = super().__new__(mcls, name, bases, namespace) # Compute set of abstract method names abstracts = {name for name, value in namespace.items() if getattr(value, "__isabstractmethod__", False)} for base in bases: for name in getattr(base, "__abstractmethods__", set()): value = getattr(cls, name, None) if getattr(value, "__isabstractmethod__", False): abstracts.add(name) cls.__abstractmethods__ = frozenset(abstracts) # Set up inheritance registry cls._abc_registry = WeakSet() cls._abc_cache = WeakSet() cls._abc_negative_cache = WeakSet() cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter return cls def register(cls, subclass): """Register a virtual subclass of an ABC. Returns the subclass, to allow usage as a class decorator. """ if not isinstance(subclass, type): raise TypeError("Can only register classes") if issubclass(subclass, cls): return subclass # Already a subclass # Subtle: test for cycles *after* testing for "already a subclass"; # this means we allow X.register(X) and interpret it as a no-op. if issubclass(cls, subclass): # This would create a cycle, which is bad for the algorithm below raise RuntimeError("Refusing to create an inheritance cycle") cls._abc_registry.add(subclass) ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache return subclass def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file) print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file) for name in sorted(cls.__dict__.keys()): if name.startswith("_abc_"): value = getattr(cls, name) print("%s: %r" % (name, value), file=file) def __instancecheck__(cls, instance): """Override for isinstance(instance, cls).""" # Inline the cache checking subclass = instance.__class__ if subclass in cls._abc_cache: return True subtype = type(instance) if subtype is subclass: if (cls._abc_negative_cache_version == ABCMeta._abc_invalidation_counter and subclass in cls._abc_negative_cache): return False # Fall back to the subclass check. return cls.__subclasscheck__(subclass) return any(cls.__subclasscheck__(c) for c in {subclass, subtype}) def __subclasscheck__(cls, subclass): """Override for issubclass(subclass, cls).""" # Check cache if subclass in cls._abc_cache: return True # Check negative cache; may have to invalidate if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: # Invalidate the negative cache cls._abc_negative_cache = WeakSet() cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter elif subclass in cls._abc_negative_cache: return False # Check the subclass hook ok = cls.__subclasshook__(subclass) if ok is not NotImplemented: assert isinstance(ok, bool) if ok: cls._abc_cache.add(subclass) else: cls._abc_negative_cache.add(subclass) return ok # Check if it's a direct subclass if cls in getattr(subclass, '__mro__', ()): cls._abc_cache.add(subclass) return True # Check if it's a subclass of a registered class (recursive) for rcls in cls._abc_registry: if issubclass(subclass, rcls): cls._abc_cache.add(subclass) return True # Check if it's a subclass of a subclass (recursive) for scls in cls.__subclasses__(): if issubclass(subclass, scls): cls._abc_cache.add(subclass) return True # No dice; update negative cache cls._abc_negative_cache.add(subclass) return False
gpl-3.0
tymofij/adofex
transifex/addons/cla/views.py
1
3292
from django.contrib import messages from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.template import RequestContext from django.shortcuts import get_object_or_404, render_to_response from django.utils.translation import ugettext_lazy as _ from django.views.generic.list_detail import object_list from transifex.projects.models import Project from transifex.projects.permissions import pr_project_add_change from transifex.projects.permissions.project import ProjectPermission from transifex.txcommon.decorators import one_perm_required_or_403 from transifex.txcommon.views import permission_denied from cla.forms import ClaForm from cla.models import Cla from cla.handlers import handle_pre_team @login_required def view(request, project_slug): project = get_object_or_404(Project, slug=project_slug) cla = get_object_or_404(Cla, project=project) return render_to_response( "view_cla.html", {'project': project, 'cla': cla}, context_instance= RequestContext(request) ) @login_required def cla_project_sign(request, project_slug): project = get_object_or_404(Project, slug=project_slug) cla = get_object_or_404(Cla, project=project) check = ProjectPermission(request.user) if not check.submit_translations(project, any_team=True): return permission_denied(request) try: signed_cla = request.user.cla_set.filter(project=project)[0] except IndexError: signed_cla = None if request.method == 'POST' and not signed_cla: form = ClaForm(request.POST) if form.is_valid(): kwargs = {'cla_sign':True, 'project':project, 'user':request.user} handle_pre_team(None, **kwargs) messages.success(request, _("You have signed the CLA.")) return HttpResponseRedirect(reverse('cla_project_sign', args=[project_slug]),) else: form = ClaForm() return render_to_response( "project_cla.html", {'project': project, 'cla': cla, 'signed_cla': signed_cla, 'form': form}, context_instance= RequestContext(request) ) @login_required @one_perm_required_or_403(pr_project_add_change, (Project, 'slug__exact', 'project_slug')) def users(request, project_slug): project = get_object_or_404(Project, slug=project_slug) cla = get_object_or_404(Cla, project=project) signed_user_ids = cla.users.values_list('id', flat=True).query unsigned_user_list = User.objects.filter( Q(team_coordinators__project=project) | Q(team_members__project=project) | Q(teamrequest__project=project) | Q(teamaccessrequest__team__project=project) ).exclude(id__in=signed_user_ids).distinct() return render_to_response( "user_list.html", {'project': project, 'cla': cla, 'signed_user_list': cla.users.all().order_by('username'), 'unsigned_user_list': unsigned_user_list.order_by('username')}, context_instance= RequestContext(request) )
gpl-3.0
beav/pulp
server/pulp/server/managers/auth/role/cud.py
1
13478
# -*- coding: utf-8 -*- # # Copyright © 2012 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. """ Contains the manager class and exceptions for operations surrounding the creation, update, and deletion on a Pulp Role. """ import re from gettext import gettext as _ from celery import task from pulp.server.async.tasks import Task from pulp.server.auth.authorization import CREATE, READ, UPDATE, DELETE, EXECUTE, \ _operations_not_granted_by_roles from pulp.server.db.model.auth import Role, User from pulp.server.exceptions import (DuplicateResource, InvalidValue, MissingResource, PulpDataException) from pulp.server.managers import factory from pulp.server.util import Delta SUPER_USER_ROLE = 'super-users' _ROLE_NAME_REGEX = re.compile(r'^[\-_A-Za-z0-9]+$') # letters, numbers, underscore, hyphen class RoleManager(object): """ Performs role related functions relating to CRUD operations. """ @staticmethod def create_role(role_id, display_name=None, description=None): """ Creates a new Pulp role. :param role_id: unique identifier for the role :type role_id: str :param display_name: user-readable name of the role :type display_name: str :param description: free form text used to describe the role :type description: str :raise DuplicateResource: if there is already a role with the requested name :raise InvalidValue: if any of the fields are unacceptable """ existing_role = Role.get_collection().find_one({'id': role_id}) if existing_role is not None: raise DuplicateResource(role_id) if role_id is None or _ROLE_NAME_REGEX.match(role_id) is None: raise InvalidValue(['role_id']) # Use the ID for the display name if one was not specified display_name = display_name or role_id # Creation create_me = Role(id=role_id, display_name=display_name, description=description) Role.get_collection().save(create_me, safe=True) # Retrieve the role to return the SON object created = Role.get_collection().find_one({'id': role_id}) return created @staticmethod def update_role(role_id, delta): """ Updates a role object. :param role_id: The role identifier. :type role_id: str :param delta: A dict containing update keywords. :type delta: dict :return: The updated object :rtype: dict :raise MissingResource: if the given role does not exist :raise PulpDataException: if update keyword is not supported """ delta.pop('id', None) role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) for key, value in delta.items(): # simple changes if key in ('display_name', 'description',): role[key] = value continue # unsupported raise PulpDataException(_("Update Keyword [%s] is not supported" % key)) Role.get_collection().save(role, safe=True) # Retrieve the user to return the SON object updated = Role.get_collection().find_one({'id': role_id}) return updated @staticmethod def delete_role(role_id): """ Deletes the given role. This has the side-effect of revoking any permissions granted to the role from the users in the role, unless those permissions are also granted through another role the user is a memeber of. :param role_id: identifies the role being deleted :type role_id: str :raise InvalidValue: if any of the fields are unacceptable :raise MissingResource: if the given role does not exist """ # Raise exception if role id is invalid if role_id is None or not isinstance(role_id, basestring): raise InvalidValue(['role_id']) # Check whether role exists role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) # Make sure role is not a superuser role if role_id == SUPER_USER_ROLE: raise PulpDataException(_('Role %s cannot be changed') % role_id) # Remove respective roles from users users = factory.user_query_manager().find_users_belonging_to_role(role_id) for item in role['permissions']: for user in users: other_roles = factory.role_query_manager().get_other_roles(role, user['roles']) user_ops = _operations_not_granted_by_roles(item['resource'], item['permission'], other_roles) factory.permission_manager().revoke(item['resource'], user['login'], user_ops) for user in users: user['roles'].remove(role_id) factory.user_manager().update_user(user['login'], Delta(user, 'roles')) Role.get_collection().remove({'id': role_id}, safe=True) @staticmethod def add_permissions_to_role(role_id, resource, operations): """ Add permissions to a role. :param role_id: role identifier :type role_id: str :param resource: resource path to grant permissions to :type resource: str :param operations: list or tuple :type operations: list of allowed operations being granted :raise MissingResource: if the given role does not exist """ if role_id == SUPER_USER_ROLE: raise PulpDataException(_('super-users role cannot be changed')) role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) if not role['permissions']: role['permissions'] = [] resource_permission = {} current_ops = [] for item in role['permissions']: if item['resource'] == resource: resource_permission = item current_ops = resource_permission['permission'] if not resource_permission: resource_permission = dict(resource=resource, permission=current_ops) role['permissions'].append(resource_permission) for o in operations: if o in current_ops: continue current_ops.append(o) users = factory.user_query_manager().find_users_belonging_to_role(role_id) for user in users: factory.permission_manager().grant(resource, user['login'], operations) Role.get_collection().save(role, safe=True) @staticmethod def remove_permissions_from_role(role_id, resource, operations): """ Remove permissions from a role. :param role_id: role identifier :type role_id: str :param resource: resource path to revoke permissions from :type resource: str :param operations: list or tuple :type operations: list of allowed operations being revoked :raise MissingResource: if the given role does not exist """ if role_id == SUPER_USER_ROLE: raise PulpDataException(_('super-users role cannot be changed')) role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) resource_permission = {} current_ops = [] for item in role['permissions']: if item['resource'] == resource: resource_permission = item current_ops = resource_permission['permission'] if not current_ops: return for o in operations: if o not in current_ops: continue current_ops.remove(o) users = factory.user_query_manager().find_users_belonging_to_role(role_id) for user in users: other_roles = factory.role_query_manager().get_other_roles(role, user['roles']) user_ops = _operations_not_granted_by_roles(resource, operations, other_roles) factory.permission_manager().revoke(resource, user['login'], user_ops) # in no more allowed operations, remove the resource if not current_ops: role['permissions'].remove(resource_permission) Role.get_collection().save(role, safe=True) @staticmethod def add_user_to_role(role_id, login): """ Add a user to a role. This has the side-effect of granting all the permissions granted to the role to the user. :param role_id: role identifier :type role_id: str :param login: login of user :type login: str :raise MissingResource: if the given role or user does not exist """ role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) user = User.get_collection().find_one({'login': login}) if user is None: raise InvalidValue(['login']) if role_id in user['roles']: return user['roles'].append(role_id) User.get_collection().save(user, safe=True) for item in role['permissions']: factory.permission_manager().grant(item['resource'], login, item.get('permission', [])) @staticmethod def remove_user_from_role(role_id, login): """ Remove a user from a role. This has the side-effect of revoking all the permissions granted to the role from the user, unless the permissions are also granted by another role. :param role_id: role identifier :type role_id: str :param login: name of user :type login: str :raise MissingResource: if the given role or user does not exist """ role = Role.get_collection().find_one({'id': role_id}) if role is None: raise MissingResource(role_id) user = User.get_collection().find_one({'login': login}) if user is None: raise MissingResource(login) if role_id == SUPER_USER_ROLE and factory.user_query_manager().is_last_super_user(login): raise PulpDataException( _('%(role)s cannot be empty, and %(login)s is the last member') % {'role': SUPER_USER_ROLE, 'login': login}) if role_id not in user['roles']: return user['roles'].remove(role_id) User.get_collection().save(user, safe=True) for item in role['permissions']: other_roles = factory.role_query_manager().get_other_roles(role, user['roles']) user_ops = _operations_not_granted_by_roles(item['resource'], item['permission'], other_roles) factory.permission_manager().revoke(item['resource'], login, user_ops) def ensure_super_user_role(self): """ Ensure that the super user role exists. """ role = self.get_role(SUPER_USER_ROLE) if role is None: role = self.create_role(SUPER_USER_ROLE, 'Super Users', 'Role indicates users with admin privileges') role['permissions'] = [{'resource': '/', 'permissions': [CREATE, READ, UPDATE, DELETE, EXECUTE]}] Role.get_collection().save(role, safe=True) @staticmethod def get_role(role): """ Get a Role by id. :param role: A role id to search for :type role: str :return: a Role object that have the given role id. :rtype: Role or None """ return Role.get_collection().find_one({'id': role}) add_permissions_to_role = task(RoleManager.add_permissions_to_role, base=Task, ignore_result=True) add_user_to_role = task(RoleManager.add_user_to_role, base=Task, ignore_result=True) create_role = task(RoleManager.create_role, base=Task) delete_role = task(RoleManager.delete_role, base=Task, ignore_result=True) remove_permissions_from_role = task(RoleManager.remove_permissions_from_role, base=Task, ignore_result=True) remove_user_from_role = task(RoleManager.remove_user_from_role, base=Task, ignore_result=True) update_role = task(RoleManager.update_role, base=Task)
gpl-2.0
varunkothamachu/seldon-server
external/predictor/python/docker/vw_train/scripts/vw_train.py
5
1982
import sys, getopt, argparse from seldon.vw import * import json if __name__ == '__main__': parser = argparse.ArgumentParser(prog='vw') parser.add_argument('--client', help='client', required=True) parser.add_argument('--zkHosts', help='zookeeper') parser.add_argument('--inputPath', help='input base folder to find features data') parser.add_argument('--outputPath', help='output folder to store model') parser.add_argument('--day', help='days to get features data for' , type=int) parser.add_argument('--activate', help='activate model in zookeeper', action='store_true') parser.add_argument('--awsKey', help='aws key - needed if input or output is on AWS and no IAM') parser.add_argument('--awsSecret', help='aws secret - needed if input or output on AWS and no IAM') parser.add_argument('--vwArgs', help='vw training args') parser.add_argument('--namespaces', help='JSON providing per feature namespace mapping - default is no namespaces') parser.add_argument('--include', help='include these features', nargs='*') parser.add_argument('--exclude', help='exclude these features' , nargs='*') parser.add_argument('--target', help='target feature (should contain integer ids in range 1..Num Classes)') parser.add_argument('--target_readable', help='the feature containing the human readable version of target') parser.add_argument('--train_filename', help='convert data to vw training format and save to file rather than directly train using wabbit_wappa') parser.add_argument('--dataType', help='json or csv', default="json") args = parser.parse_args() opts = vars(args) conf = {} for k in opts: if opts[k]: if k == "features" or k == "namespaces": conf[k] = json.loads(opts[k]) else: conf[k] = opts[k] print conf train_filename = opts.get("train_filename",None) vw = VWSeldon(**conf) vw.train(train_filename)
apache-2.0
RackSec/ansible
lib/ansible/modules/utilities/logic/wait_for.py
11
21920
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Jeroen Hoekx <jeroen@hoekx.be> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: wait_for short_description: Waits for a condition before continuing. description: - You can wait for a set amount of time C(timeout), this is the default if nothing is specified. - Waiting for a port to become available is useful for when services are not immediately available after their init scripts return which is true of certain Java application servers. It is also useful when starting guests with the M(virt) module and needing to pause until they are ready. - This module can also be used to wait for a regex match a string to be present in a file. - In 1.6 and later, this module can also be used to wait for a file to be available or absent on the filesystem. - In 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node is being rotated out of a load balancer pool. version_added: "0.7" options: host: description: - A resolvable hostname or IP address to wait for required: false default: "127.0.0.1" timeout: description: - maximum number of seconds to wait for required: false default: 300 connect_timeout: description: - maximum number of seconds to wait for a connection to happen before closing and retrying required: false default: 5 delay: description: - number of seconds to wait before starting to poll required: false default: 0 port: description: - port number to poll required: false default: null active_connection_states: description: - The list of tcp connection states which are counted as active connections default: ['ESTABLISHED','SYN_SENT','SYN_RECV','FIN_WAIT1','FIN_WAIT2','TIME_WAIT'] version_added: "2.3" state: description: - either C(present), C(started), or C(stopped), C(absent), or C(drained) - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed choices: [ "present", "started", "stopped", "absent", "drained" ] required: False default: "started" path: version_added: "1.4" required: false default: null description: - path to a file on the filesytem that must exist before continuing search_regex: version_added: "1.4" required: false default: null description: - Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex. exclude_hosts: version_added: "1.8" required: false default: null description: - list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state sleep: version_added: "2.3" required: false default: 1 description: - Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second. notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] author: - "Jeroen Hoekx (@jhoekx)" - "John Jarvis (@jarv)" - "Andrii Radyk (@AnderEnder)" ''' EXAMPLES = ''' # wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds - wait_for: port: 8000 delay: 10 # wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds - wait_for: host: 0.0.0.0 port: 8000 delay: 10 state: drained # wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts - wait_for: host: 0.0.0.0 port: 8000 state: drained exclude_hosts: 10.2.1.2,10.2.1.3 # wait until the file /tmp/foo is present before continuing - wait_for: path: /tmp/foo # wait until the string "completed" is in the file /tmp/foo before continuing - wait_for: path: /tmp/foo search_regex: completed # wait until the lock file is removed - wait_for: path: /var/lock/file.lock state: absent # wait until the process is finished and pid was destroyed - wait_for: path: /proc/3466/status state: absent # wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable # and don't start checking for 10 seconds - local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10 ''' import binascii import datetime import math import re import select import socket import sys import time from ansible.module_utils._text import to_native HAS_PSUTIL = False try: import psutil HAS_PSUTIL = True # just because we can import it on Linux doesn't mean we will use it except ImportError: pass class TCPConnectionInfo(object): """ This is a generic TCP Connection Info strategy class that relies on the psutil module, which is not ideal for targets, but necessary for cross platform support. A subclass may wish to override some or all of these methods. - _get_exclude_ips() - get_active_connections() All subclasses MUST define platform and distribution (which may be None). """ platform = 'Generic' distribution = None match_all_ips = { socket.AF_INET: '0.0.0.0', socket.AF_INET6: '::', } ipv4_mapped_ipv6_address = { 'prefix': '::ffff', 'match_all': '::ffff:0.0.0.0' } def __new__(cls, *args, **kwargs): return load_platform_subclass(TCPConnectionInfo, args, kwargs) def __init__(self, module): self.module = module self.ips = _convert_host_to_ip(module.params['host']) self.port = int(self.module.params['port']) self.exclude_ips = self._get_exclude_ips() if not HAS_PSUTIL: module.fail_json(msg="psutil module required for wait_for") def _get_exclude_ips(self): exclude_hosts = self.module.params['exclude_hosts'] exclude_ips = [] if exclude_hosts is not None: for host in exclude_hosts: exclude_ips.extend(_convert_host_to_ip(host)) return exclude_ips def get_active_connections_count(self): active_connections = 0 for p in psutil.process_iter(): connections = p.get_connections(kind='inet') for conn in connections: if conn.status not in self.module.params['active_connection_states']: continue (local_ip, local_port) = conn.local_address if self.port != local_port: continue (remote_ip, remote_port) = conn.remote_address if (conn.family, remote_ip) in self.exclude_ips: continue if any(( (conn.family, local_ip) in self.ips, (conn.family, self.match_all_ips[conn.family]) in self.ips, local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips, )): active_connections += 1 return active_connections # =========================================== # Subclass: Linux class LinuxTCPConnectionInfo(TCPConnectionInfo): """ This is a TCP Connection Info evaluation strategy class that utilizes information from Linux's procfs. While less universal, does allow Linux targets to not require an additional library. """ platform = 'Linux' distribution = None source_file = { socket.AF_INET: '/proc/net/tcp', socket.AF_INET6: '/proc/net/tcp6' } match_all_ips = { socket.AF_INET: '00000000', socket.AF_INET6: '00000000000000000000000000000000', } ipv4_mapped_ipv6_address = { 'prefix': '0000000000000000FFFF0000', 'match_all': '0000000000000000FFFF000000000000' } local_address_field = 1 remote_address_field = 2 connection_state_field = 3 def __init__(self, module): self.module = module self.ips = _convert_host_to_hex(module.params['host']) self.port = "%0.4X" % int(module.params['port']) self.exclude_ips = self._get_exclude_ips() def _get_exclude_ips(self): exclude_hosts = self.module.params['exclude_hosts'] exclude_ips = [] if exclude_hosts is not None: for host in exclude_hosts: exclude_ips.extend(_convert_host_to_hex(host)) return exclude_ips def get_active_connections_count(self): active_connections = 0 for family in self.source_file.keys(): f = open(self.source_file[family]) for tcp_connection in f.readlines(): tcp_connection = tcp_connection.strip().split() if tcp_connection[self.local_address_field] == 'local_address': continue if (tcp_connection[self.connection_state_field] not in [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]): continue (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') if self.port != local_port: continue (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') if (family, remote_ip) in self.exclude_ips: continue if any(( (family, local_ip) in self.ips, (family, self.match_all_ips[family]) in self.ips, local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips, )): active_connections += 1 f.close() return active_connections def _convert_host_to_ip(host): """ Perform forward DNS resolution on host, IP will give the same IP Args: host: String with either hostname, IPv4, or IPv6 address Returns: List of tuples containing address family and IP """ addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP) ips = [] for family, socktype, proto, canonname, sockaddr in addrinfo: ip = sockaddr[0] ips.append((family, ip)) if family == socket.AF_INET: ips.append((socket.AF_INET6, "::ffff:" + ip)) return ips def _convert_host_to_hex(host): """ Convert the provided host to the format in /proc/net/tcp* /proc/net/tcp uses little-endian four byte hex for ipv4 /proc/net/tcp6 uses little-endian per 4B word for ipv6 Args: host: String with either hostname, IPv4, or IPv6 address Returns: List of tuples containing address family and the little-endian converted host """ ips = [] if host is not None: for family, ip in _convert_host_to_ip(host): hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip)) hexip_hf = "" for i in range(0, len(hexip_nf), 8): ipgroup_nf = hexip_nf[i:i+8] ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16)) hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf) ips.append((family, hexip_hf)) return ips def _create_connection(host, port, connect_timeout): """ Connect to a 2-tuple (host, port) and return the socket object. Args: 2-tuple (host, port) and connection timeout Returns: Socket object """ if sys.version_info < (2, 6): (family, _) = (_convert_host_to_ip(host))[0] connect_socket = socket.socket(family, socket.SOCK_STREAM) connect_socket.settimeout(connect_timeout) connect_socket.connect( (host, port) ) else: connect_socket = socket.create_connection( (host, port), connect_timeout) return connect_socket def _timedelta_total_seconds(timedelta): return ( timedelta.microseconds + 0.0 + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 def get_connection_state_id(state): connection_state_id = { 'ESTABLISHED': '01', 'SYN_SENT': '02', 'SYN_RECV': '03', 'FIN_WAIT1': '04', 'FIN_WAIT2': '05', 'TIME_WAIT': '06', } return connection_state_id[state] def main(): module = AnsibleModule( argument_spec = dict( host=dict(default='127.0.0.1'), timeout=dict(default=300, type='int'), connect_timeout=dict(default=5, type='int'), delay=dict(default=0, type='int'), port=dict(default=None, type='int'), active_connection_states=dict(default=['ESTABLISHED','SYN_SENT','SYN_RECV','FIN_WAIT1','FIN_WAIT2','TIME_WAIT'], type='list'), path=dict(default=None, type='path'), search_regex=dict(default=None), state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), exclude_hosts=dict(default=None, type='list'), sleep=dict(default=1, type='int') ), ) params = module.params host = params['host'] timeout = params['timeout'] connect_timeout = params['connect_timeout'] delay = params['delay'] port = params['port'] state = params['state'] path = params['path'] search_regex = params['search_regex'] if search_regex is not None: compiled_search_re = re.compile(search_regex, re.MULTILINE) else: compiled_search_re = None if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") if path and state == 'stopped': module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") if path and state == 'drained': module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") if params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained") for _connection_state in params['active_connection_states']: try: get_connection_state_id(_connection_state) except: module.fail_json(msg="unknown active_connection_state ("+_connection_state+") defined") start = datetime.datetime.utcnow() if delay: time.sleep(delay) if not port and not path and state != 'drained': time.sleep(timeout) elif state in [ 'stopped', 'absent' ]: ### first wait for the stop condition end = start + datetime.timedelta(seconds=timeout) while datetime.datetime.utcnow() < end: if path: try: f = open(path) f.close() except IOError: break elif port: try: s = _create_connection(host, port, connect_timeout) s.shutdown(socket.SHUT_RDWR) s.close() except: break # Conditions not yet met, wait and try again time.sleep(params['sleep']) else: elapsed = datetime.datetime.utcnow() - start if port: module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) elif path: module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) elif state in ['started', 'present']: ### wait for start condition end = start + datetime.timedelta(seconds=timeout) while datetime.datetime.utcnow() < end: if path: try: os.stat(path) except OSError: e = get_exception() # If anything except file not present, throw an error if e.errno != 2: elapsed = datetime.datetime.utcnow() - start module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) # file doesn't exist yet, so continue else: # File exists. Are there additional things to check? if not compiled_search_re: # nope, succeed! break try: f = open(path) try: if re.search(compiled_search_re, f.read()): # String found, success! break finally: f.close() except IOError: pass elif port: alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow())) try: s = _create_connection(host, port, min(connect_timeout, alt_connect_timeout)) except: # Failed to connect by connect_timeout. wait and try again pass else: # Connected -- are there additional conditions? if compiled_search_re: data = '' matched = False while datetime.datetime.utcnow() < end: max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow())) (readable, w, e) = select.select([s], [], [], max_timeout) if not readable: # No new data. Probably means our timeout # expired continue response = s.recv(1024) if not response: # Server shutdown break data += to_native(response, errors='surrogate_or_strict') if re.search(compiled_search_re, data): matched = True break # Shutdown the client socket s.shutdown(socket.SHUT_RDWR) s.close() if matched: # Found our string, success! break else: # Connection established, success! s.shutdown(socket.SHUT_RDWR) s.close() break # Conditions not yet met, wait and try again time.sleep(params['sleep']) else: # while-else # Timeout expired elapsed = datetime.datetime.utcnow() - start if port: if search_regex: module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds) else: module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) elif path: if search_regex: module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) else: module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) elif state == 'drained': ### wait until all active connections are gone end = start + datetime.timedelta(seconds=timeout) tcpconns = TCPConnectionInfo(module) while datetime.datetime.utcnow() < end: try: if tcpconns.get_active_connections_count() == 0: break except IOError: pass # Conditions not yet met, wait and try again time.sleep(params['sleep']) else: elapsed = datetime.datetime.utcnow() - start module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) elapsed = datetime.datetime.utcnow() - start module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
mtconley/turntable
test/lib/python2.7/site-packages/scipy/ndimage/morphology.py
16
79457
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import numpy from . import _ni_support from . import _nd_image from . import filters __all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing', 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', 'morphological_gradient', 'morphological_laplace', 'white_tophat', 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', 'distance_transform_edt'] def _center_is_true(structure, origin): structure = numpy.array(structure) coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)]) return bool(structure[coor]) def iterate_structure(structure, iterations, origin=None): """ Iterate a structure by dilating it with itself. Parameters ---------- structure : array_like Structuring element (an array of bools, for example), to be dilated with itself. iterations : int number of dilations performed on the structure with itself origin : optional If origin is None, only the iterated structure is returned. If not, a tuple of the iterated structure and the modified origin is returned. Returns ------- iterate_structure : ndarray of bools A new structuring element obtained by dilating `structure` (`iterations` - 1) times with itself. See also -------- generate_binary_structure Examples -------- >>> struct = ndimage.generate_binary_structure(2, 1) >>> struct.astype(int) array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) >>> ndimage.iterate_structure(struct, 2).astype(int) array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]) >>> ndimage.iterate_structure(struct, 3).astype(int) array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]]) """ structure = numpy.asarray(structure) if iterations < 2: return structure.copy() ni = iterations - 1 shape = [ii + ni * (ii - 1) for ii in structure.shape] pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] slc = [slice(pos[ii], pos[ii] + structure.shape[ii], None) for ii in range(len(shape))] out = numpy.zeros(shape, bool) out[slc] = structure != 0 out = binary_dilation(out, structure, iterations=ni) if origin is None: return out else: origin = _ni_support._normalize_sequence(origin, structure.ndim) origin = [iterations * o for o in origin] return out, origin def generate_binary_structure(rank, connectivity): """ Generate a binary structure for binary morphological operations. Parameters ---------- rank : int Number of dimensions of the array to which the structuring element will be applied, as returned by `np.ndim`. connectivity : int `connectivity` determines which elements of the output array belong to the structure, i.e. are considered as neighbors of the central element. Elements up to a squared distance of `connectivity` from the center are considered neighbors. `connectivity` may range from 1 (no diagonal elements are neighbors) to `rank` (all elements are neighbors). Returns ------- output : ndarray of bools Structuring element which may be used for binary morphological operations, with `rank` dimensions and all dimensions equal to 3. See also -------- iterate_structure, binary_dilation, binary_erosion Notes ----- `generate_binary_structure` can only create structuring elements with dimensions equal to 3, i.e. minimal dimensions. For larger structuring elements, that are useful e.g. for eroding large objects, one may either use `iterate_structure`, or create directly custom arrays with numpy functions such as `numpy.ones`. Examples -------- >>> struct = ndimage.generate_binary_structure(2, 1) >>> struct array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> a = np.zeros((5,5)) >>> a[2, 2] = 1 >>> a array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) >>> b array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) array([[ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 1., 1., 1., 1., 1.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.]]) >>> struct = ndimage.generate_binary_structure(2, 2) >>> struct array([[ True, True, True], [ True, True, True], [ True, True, True]], dtype=bool) >>> struct = ndimage.generate_binary_structure(3, 1) >>> struct # no diagonal elements array([[[False, False, False], [False, True, False], [False, False, False]], [[False, True, False], [ True, True, True], [False, True, False]], [[False, False, False], [False, True, False], [False, False, False]]], dtype=bool) """ if connectivity < 1: connectivity = 1 if rank < 1: if connectivity < 1: return numpy.array(0, dtype=bool) else: return numpy.array(1, dtype=bool) output = numpy.fabs(numpy.indices([3] * rank) - 1) output = numpy.add.reduce(output, 0) return numpy.asarray(output <= connectivity, dtype=bool) def _binary_erosion(input, structure, iterations, mask, output, border_value, origin, invert, brute_force): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if structure is None: structure = generate_binary_structure(input.ndim, 1) else: structure = numpy.asarray(structure) structure = structure.astype(bool) if structure.ndim != input.ndim: raise RuntimeError('structure and input must have same dimensionality') if not structure.flags.contiguous: structure = structure.copy() if numpy.product(structure.shape,axis=0) < 1: raise RuntimeError('structure must not be empty') if mask is not None: mask = numpy.asarray(mask) if mask.shape != input.shape: raise RuntimeError('mask and input must have equal sizes') origin = _ni_support._normalize_sequence(origin, input.ndim) cit = _center_is_true(structure, origin) if isinstance(output, numpy.ndarray): if numpy.iscomplexobj(output): raise TypeError('Complex output type not supported') else: output = bool output, return_value = _ni_support._get_output(output, input) if iterations == 1: _nd_image.binary_erosion(input, structure, mask, output, border_value, origin, invert, cit, 0) return return_value elif cit and not brute_force: changed, coordinate_list = _nd_image.binary_erosion(input, structure, mask, output, border_value, origin, invert, cit, 1) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] for ii in range(len(origin)): origin[ii] = -origin[ii] if not structure.shape[ii] & 1: origin[ii] -= 1 if mask is not None: msk = numpy.asarray(mask) msk = mask.astype(numpy.int8) if msk is mask: msk = mask.copy() mask = msk if not structure.flags.contiguous: structure = structure.copy() _nd_image.binary_erosion2(output, structure, mask, iterations - 1, origin, invert, coordinate_list) return return_value else: tmp_in = numpy.zeros(input.shape, bool) if return_value is None: tmp_out = output else: tmp_out = numpy.zeros(input.shape, bool) if not iterations & 1: tmp_in, tmp_out = tmp_out, tmp_in changed = _nd_image.binary_erosion(input, structure, mask, tmp_out, border_value, origin, invert, cit, 0) ii = 1 while (ii < iterations) or (iterations < 1) and changed: tmp_in, tmp_out = tmp_out, tmp_in changed = _nd_image.binary_erosion(tmp_in, structure, mask, tmp_out, border_value, origin, invert, cit, 0) ii += 1 if return_value is not None: return tmp_out def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): """ Multi-dimensional binary erosion with a given structuring element. Binary erosion is a mathematical morphology operation used for image processing. Parameters ---------- input : array_like Binary image to be eroded. Non-zero (True) elements form the subset to be eroded. structure : array_like, optional Structuring element used for the erosion. Non-zero elements are considered True. If no structuring element is provided, an element is generated with a square connectivity equal to one. iterations : {int, float}, optional The erosion is repeated `iterations` times (one, by default). If iterations is less than 1, the erosion is repeated until the result does not change anymore. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. border_value : int (cast to 0 or 1) Value at the border in the output array. Returns ------- binary_erosion : ndarray of bools Erosion of the input by the structuring element. See also -------- grey_erosion, binary_dilation, binary_closing, binary_opening, generate_binary_structure Notes ----- Erosion [1]_ is a mathematical morphology operation [2]_ that uses a structuring element for shrinking the shapes in an image. The binary erosion of an image by a structuring element is the locus of the points where a superimposition of the structuring element centered on the point is entirely contained in the set of non-zero elements of the image. References ---------- .. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((7,7), dtype=np.int) >>> a[1:6, 2:5] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_erosion(a).astype(a.dtype) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> #Erosion removes objects smaller than the structure >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 0, brute_force) def binary_dilation(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): """ Multi-dimensional binary dilation with the given structuring element. Parameters ---------- input : array_like Binary array_like to be dilated. Non-zero (True) elements form the subset to be dilated. structure : array_like, optional Structuring element used for the dilation. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one. iterations : {int, float}, optional The dilation is repeated `iterations` times (one, by default). If iterations is less than 1, the dilation is repeated until the result does not change anymore. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. border_value : int (cast to 0 or 1) Value at the border in the output array. Returns ------- binary_dilation : ndarray of bools Dilation of the input by the structuring element. See also -------- grey_dilation, binary_erosion, binary_closing, binary_opening, generate_binary_structure Notes ----- Dilation [1]_ is a mathematical morphology operation [2]_ that uses a structuring element for expanding the shapes in an image. The binary dilation of an image by a structuring element is the locus of the points covered by the structuring element, when its center lies within the non-zero points of the image. References ---------- .. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((5, 5)) >>> a[2, 2] = 1 >>> a array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a) array([[False, False, False, False, False], [False, False, True, False, False], [False, True, True, True, False], [False, False, True, False, False], [False, False, False, False, False]], dtype=bool) >>> ndimage.binary_dilation(a).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> # 3x3 structuring element with connectivity 1, used by default >>> struct1 = ndimage.generate_binary_structure(2, 1) >>> struct1 array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> # 3x3 structuring element with connectivity 2 >>> struct2 = ndimage.generate_binary_structure(2, 2) >>> struct2 array([[ True, True, True], [ True, True, True], [ True, True, True]], dtype=bool) >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 1., 1., 1., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a, structure=struct1,\\ ... iterations=2).astype(a.dtype) array([[ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 1., 1., 1., 1., 1.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.]]) """ input = numpy.asarray(input) if structure is None: structure = generate_binary_structure(input.ndim, 1) origin = _ni_support._normalize_sequence(origin, input.ndim) structure = numpy.asarray(structure) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] for ii in range(len(origin)): origin[ii] = -origin[ii] if not structure.shape[ii] & 1: origin[ii] -= 1 return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 1, brute_force) def binary_opening(input, structure=None, iterations=1, output=None, origin=0): """ Multi-dimensional binary opening with the given structuring element. The *opening* of an input image by a structuring element is the *dilation* of the *erosion* of the image by the structuring element. Parameters ---------- input : array_like Binary array_like to be opened. Non-zero (True) elements form the subset to be opened. structure : array_like, optional Structuring element used for the opening. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one (i.e., only nearest neighbors are connected to the center, diagonally-connected elements are not considered neighbors). iterations : {int, float}, optional The erosion step of the opening, then the dilation step are each repeated `iterations` times (one, by default). If `iterations` is less than 1, each operation is repeated until the result does not change anymore. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. Returns ------- binary_opening : ndarray of bools Opening of the input by the structuring element. See also -------- grey_opening, binary_closing, binary_erosion, binary_dilation, generate_binary_structure Notes ----- *Opening* [1]_ is a mathematical morphology operation [2]_ that consists in the succession of an erosion and a dilation of the input with the same structuring element. Opening therefore removes objects smaller than the structuring element. Together with *closing* (`binary_closing`), opening can be used for noise removal. References ---------- .. [1] http://en.wikipedia.org/wiki/Opening_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((5,5), dtype=np.int) >>> a[1:4, 1:4] = 1; a[4, 4] = 1 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 1]]) >>> # Opening removes small objects >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(np.int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Opening can also smooth corners >>> ndimage.binary_opening(a).astype(np.int) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) >>> # Opening is the dilation of the erosion of the input >>> ndimage.binary_erosion(a).astype(np.int) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(np.int) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure is None: rank = input.ndim structure = generate_binary_structure(rank, 1) tmp = binary_erosion(input, structure, iterations, None, None, 0, origin) return binary_dilation(tmp, structure, iterations, None, output, 0, origin) def binary_closing(input, structure=None, iterations=1, output=None, origin=0): """ Multi-dimensional binary closing with the given structuring element. The *closing* of an input image by a structuring element is the *erosion* of the *dilation* of the image by the structuring element. Parameters ---------- input : array_like Binary array_like to be closed. Non-zero (True) elements form the subset to be closed. structure : array_like, optional Structuring element used for the closing. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one (i.e., only nearest neighbors are connected to the center, diagonally-connected elements are not considered neighbors). iterations : {int, float}, optional The dilation step of the closing, then the erosion step are each repeated `iterations` times (one, by default). If iterations is less than 1, each operations is repeated until the result does not change anymore. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. Returns ------- binary_closing : ndarray of bools Closing of the input by the structuring element. See also -------- grey_closing, binary_opening, binary_dilation, binary_erosion, generate_binary_structure Notes ----- *Closing* [1]_ is a mathematical morphology operation [2]_ that consists in the succession of a dilation and an erosion of the input with the same structuring element. Closing therefore fills holes smaller than the structuring element. Together with *opening* (`binary_opening`), closing can be used for noise removal. References ---------- .. [1] http://en.wikipedia.org/wiki/Closing_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((5,5), dtype=np.int) >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Closing removes small holes >>> ndimage.binary_closing(a).astype(np.int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Closing is the erosion of the dilation of the input >>> ndimage.binary_dilation(a).astype(np.int) array([[0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0]]) >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(np.int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> a = np.zeros((7,7), dtype=np.int) >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # In addition to removing holes, closing can also >>> # coarsen boundaries with fine hollows. >>> ndimage.binary_closing(a).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure is None: rank = input.ndim structure = generate_binary_structure(rank, 1) tmp = binary_dilation(input, structure, iterations, None, None, 0, origin) return binary_erosion(tmp, structure, iterations, None, output, 0, origin) def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, origin1=0, origin2=None): """ Multi-dimensional binary hit-or-miss transform. The hit-or-miss transform finds the locations of a given pattern inside the input image. Parameters ---------- input : array_like (cast to booleans) Binary image where a pattern is to be detected. structure1 : array_like (cast to booleans), optional Part of the structuring element to be fitted to the foreground (non-zero elements) of `input`. If no value is provided, a structure of square connectivity 1 is chosen. structure2 : array_like (cast to booleans), optional Second part of the structuring element that has to miss completely the foreground. If no value is provided, the complementary of `structure1` is taken. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin1 : int or tuple of ints, optional Placement of the first part of the structuring element `structure1`, by default 0 for a centered structure. origin2 : int or tuple of ints, optional Placement of the second part of the structuring element `structure2`, by default 0 for a centered structure. If a value is provided for `origin1` and not for `origin2`, then `origin2` is set to `origin1`. Returns ------- binary_hit_or_miss : ndarray Hit-or-miss transform of `input` with the given structuring element (`structure1`, `structure2`). See also -------- ndimage.morphology, binary_erosion References ---------- .. [1] http://en.wikipedia.org/wiki/Hit-or-miss_transform Examples -------- >>> a = np.zeros((7,7), dtype=np.int) >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) >>> structure1 array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) >>> # Find the matches of structure1 in the array a >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # Change the origin of the filter >>> # origin1=1 is equivalent to origin1=(1,1) here >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ ... origin1=1).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure1 is None: structure1 = generate_binary_structure(input.ndim, 1) if structure2 is None: structure2 = numpy.logical_not(structure1) origin1 = _ni_support._normalize_sequence(origin1, input.ndim) if origin2 is None: origin2 = origin1 else: origin2 = _ni_support._normalize_sequence(origin2, input.ndim) tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0, False) inplace = isinstance(output, numpy.ndarray) result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1, False) if inplace: numpy.logical_not(output, output) numpy.logical_and(tmp1, output, output) else: numpy.logical_not(result, result) return numpy.logical_and(tmp1, result) def binary_propagation(input, structure=None, mask=None, output=None, border_value=0, origin=0): """ Multi-dimensional binary propagation with the given structuring element. Parameters ---------- input : array_like Binary image to be propagated inside `mask`. structure : array_like Structuring element used in the successive dilations. The output may depend on the structuring element, especially if `mask` has several connex components. If no structuring element is provided, an element is generated with a squared connectivity equal to one. mask : array_like Binary mask defining the region into which `input` is allowed to propagate. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. Returns ------- binary_propagation : ndarray Binary propagation of `input` inside `mask`. Notes ----- This function is functionally equivalent to calling binary_dilation with the number of iterations less then one: iterative dilation until the result does not change anymore. The succession of an erosion and propagation inside the original image can be used instead of an *opening* for deleting small objects while keeping the contours of larger objects untouched. References ---------- .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. .. [2] http://www.qi.tnw.tudelft.nl/Courses/FIP/noframes/fip-Morpholo.html#Heading102 Examples -------- >>> input = np.zeros((8, 8), dtype=np.int) >>> input[2, 2] = 1 >>> mask = np.zeros((8, 8), dtype=np.int) >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 >>> input array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> mask array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]) >>> ndimage.binary_propagation(input, mask=mask).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_propagation(input, mask=mask,\\ ... structure=np.ones((3,3))).astype(np.int) array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> # Comparison between opening and erosion+propagation >>> a = np.zeros((6,6), dtype=np.int) >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 >>> a array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1]]) >>> ndimage.binary_opening(a).astype(np.int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> b = ndimage.binary_erosion(a) >>> b.astype(int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_propagation(b, mask=a).astype(np.int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]) """ return binary_dilation(input, structure, -1, mask, output, border_value, origin) def binary_fill_holes(input, structure=None, output=None, origin=0): """ Fill the holes in binary objects. Parameters ---------- input : array_like n-dimensional binary array with holes to be filled structure : array_like, optional Structuring element used in the computation; large-size elements make computations faster but may miss holes separated from the background by thin regions. The default element (with a square connectivity equal to one) yields the intuitive result where all holes in the input have been filled. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int, tuple of ints, optional Position of the structuring element. Returns ------- out : ndarray Transformation of the initial image `input` where holes have been filled. See also -------- binary_dilation, binary_propagation, label Notes ----- The algorithm used in this function consists in invading the complementary of the shapes in `input` from the outer boundary of the image, using binary dilations. Holes are not connected to the boundary and are therefore not invaded. The result is the complementary subset of the invaded region. References ---------- .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((5, 5), dtype=int) >>> a[1:4, 1:4] = 1 >>> a[2,2] = 0 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> ndimage.binary_fill_holes(a).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Too big structuring element >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) """ mask = numpy.logical_not(input) tmp = numpy.zeros(mask.shape, bool) inplace = isinstance(output, numpy.ndarray) if inplace: binary_dilation(tmp, structure, -1, mask, output, 1, origin) numpy.logical_not(output, output) else: output = binary_dilation(tmp, structure, -1, mask, None, 1, origin) numpy.logical_not(output, output) return output def grey_erosion(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a greyscale erosion, using either a structuring element, or a footprint corresponding to a flat structuring element. Grayscale erosion is a mathematical morphology operation. For the simple case of a full and flat structuring element, it can be viewed as a minimum filter over a sliding window. Parameters ---------- input : array_like Array over which the grayscale erosion is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale erosion. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale erosion. Non-zero values give the set of neighbors of the center over which the minimum is chosen. structure : array of ints, optional Structuring element used for the grayscale erosion. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the ouput of the erosion may be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- output : ndarray Grayscale erosion of `input`. See also -------- binary_erosion, grey_dilation, grey_opening, grey_closing generate_binary_structure, ndimage.minimum_filter Notes ----- The grayscale erosion of an image input by a structuring element s defined over a domain E is given by: (input+s)(x) = min {input(y) - s(x-y), for y in E} In particular, for structuring elements defined as s(y) = 0 for y in E, the grayscale erosion computes the minimum of the input image inside a sliding window defined by E. Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. References ---------- .. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((7,7), dtype=np.int) >>> a[1:6, 1:6] = 3 >>> a[4,4] = 2; a[2,3] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 3, 3, 3, 3, 3, 0], [0, 3, 3, 1, 3, 3, 0], [0, 3, 3, 3, 3, 3, 0], [0, 3, 3, 3, 2, 3, 0], [0, 3, 3, 3, 3, 3, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_erosion(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 3, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> footprint = ndimage.generate_binary_structure(2, 1) >>> footprint array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> # Diagonally-connected elements are not considered neighbors >>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 3, 1, 2, 0, 0], [0, 0, 3, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ if size is None and footprint is None and structure is None: raise ValueError("size, footprint or structure must be specified") return filters._min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, 1) def grey_dilation(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a greyscale dilation, using either a structuring element, or a footprint corresponding to a flat structuring element. Grayscale dilation is a mathematical morphology operation. For the simple case of a full and flat structuring element, it can be viewed as a maximum filter over a sliding window. Parameters ---------- input : array_like Array over which the grayscale dilation is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale dilation. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale dilation. Non-zero values give the set of neighbors of the center over which the maximum is chosen. structure : array of ints, optional Structuring element used for the grayscale dilation. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the ouput of the dilation may be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_dilation : ndarray Grayscale dilation of `input`. See also -------- binary_dilation, grey_erosion, grey_closing, grey_opening generate_binary_structure, ndimage.maximum_filter Notes ----- The grayscale dilation of an image input by a structuring element s defined over a domain E is given by: (input+s)(x) = max {input(y) + s(x-y), for y in E} In particular, for structuring elements defined as s(y) = 0 for y in E, the grayscale dilation computes the maximum of the input image inside a sliding window defined by E. Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. References ---------- .. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29 .. [2] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((7,7), dtype=np.int) >>> a[2:5, 2:5] = 1 >>> a[4,4] = 2; a[2,3] = 3 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> s = ndimage.generate_binary_structure(2,1) >>> s array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> ndimage.grey_dilation(a, footprint=s) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 1, 3, 2, 1, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) array([[1, 1, 1, 1, 1, 1, 1], [1, 2, 4, 4, 4, 2, 1], [1, 2, 4, 4, 4, 2, 1], [1, 2, 4, 4, 4, 3, 1], [1, 2, 2, 3, 3, 3, 1], [1, 2, 2, 3, 3, 3, 1], [1, 1, 1, 1, 1, 1, 1]]) """ if size is None and footprint is None and structure is None: raise ValueError("size, footprint or structure must be specified") if structure is not None: structure = numpy.asarray(structure) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] if footprint is not None: footprint = numpy.asarray(footprint) footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)] input = numpy.asarray(input) origin = _ni_support._normalize_sequence(origin, input.ndim) for ii in range(len(origin)): origin[ii] = -origin[ii] if footprint is not None: sz = footprint.shape[ii] elif structure is not None: sz = structure.shape[ii] elif numpy.isscalar(size): sz = size else: sz = size[ii] if not sz & 1: origin[ii] -= 1 return filters._min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, 0) def grey_opening(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional greyscale opening. A greyscale opening consists in the succession of a greyscale erosion, and a greyscale dilation. Parameters ---------- input : array_like Array over which the grayscale opening is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale opening. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale opening. structure : array of ints, optional Structuring element used for the grayscale opening. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the ouput of the opening may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_opening : ndarray Result of the grayscale opening of `input` with `structure`. See also -------- binary_opening, grey_dilation, grey_erosion, grey_closing generate_binary_structure Notes ----- The action of a grayscale opening with a flat structuring element amounts to smoothen high local maxima, whereas binary opening erases small objects. References ---------- .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.arange(36).reshape((6,6)) >>> a[3, 3] = 50 >>> a array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 50, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35]]) >>> ndimage.grey_opening(a, size=(3,3)) array([[ 0, 1, 2, 3, 4, 4], [ 6, 7, 8, 9, 10, 10], [12, 13, 14, 15, 16, 16], [18, 19, 20, 22, 22, 22], [24, 25, 26, 27, 28, 28], [24, 25, 26, 27, 28, 28]]) >>> # Note that the local maximum a[3,3] has disappeared """ tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) return grey_dilation(tmp, size, footprint, structure, output, mode, cval, origin) def grey_closing(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional greyscale closing. A greyscale closing consists in the succession of a greyscale dilation, and a greyscale erosion. Parameters ---------- input : array_like Array over which the grayscale closing is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale closing. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale closing. structure : array of ints, optional Structuring element used for the grayscale closing. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the ouput of the closing may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_closing : ndarray Result of the grayscale closing of `input` with `structure`. See also -------- binary_closing, grey_dilation, grey_erosion, grey_opening, generate_binary_structure Notes ----- The action of a grayscale closing with a flat structuring element amounts to smoothen deep local minima, whereas binary closing fills small holes. References ---------- .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.arange(36).reshape((6,6)) >>> a[3,3] = 0 >>> a array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 0, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35]]) >>> ndimage.grey_closing(a, size=(3,3)) array([[ 7, 7, 8, 9, 10, 11], [ 7, 7, 8, 9, 10, 11], [13, 13, 14, 15, 16, 17], [19, 19, 20, 20, 22, 23], [25, 25, 26, 27, 28, 29], [31, 31, 32, 33, 34, 35]]) >>> # Note that the local minimum a[3,3] has disappeared """ tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) return grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin) def morphological_gradient(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional morphological gradient. The morphological gradient is calculated as the difference between a dilation and an erosion of the input with a given structuring element. Parameters ---------- input : array_like Array over which to compute the morphlogical gradient. size : tuple of ints Shape of a flat and full structuring element used for the mathematical morphology operations. Optional if `footprint` or `structure` is provided. A larger `size` yields a more blurred gradient. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the morphology operations. Larger footprints give a more blurred morphological gradient. structure : array of ints, optional Structuring element used for the morphology operations. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the ouput of the morphological gradient may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- morphological_gradient : ndarray Morphological gradient of `input`. See also -------- grey_dilation, grey_erosion, ndimage.gaussian_gradient_magnitude Notes ----- For a flat structuring element, the morphological gradient computed at a given point corresponds to the maximal difference between elements of the input among the elements covered by the structuring element centered on the point. References ---------- .. [1] http://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> a = np.zeros((7,7), dtype=np.int) >>> a[2:5, 2:5] = 1 >>> ndimage.morphological_gradient(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # The morphological gradient is computed as the difference >>> # between a dilation and an erosion >>> ndimage.grey_dilation(a, size=(3,3)) -\\ ... ndimage.grey_erosion(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> a = np.zeros((7,7), dtype=np.int) >>> a[2:5, 2:5] = 1 >>> a[4,4] = 2; a[2,3] = 3 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.morphological_gradient(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 2, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) """ tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) return numpy.subtract(tmp, output, output) else: return (tmp - grey_erosion(input, size, footprint, structure, None, mode, cval, origin)) def morphological_laplace(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional morphological laplace. Parameters ---------- input : array_like Input. size : int or sequence of ints, optional See `structure`. footprint : bool or ndarray, optional See `structure`. structure : structure Either `size`, `footprint`, or the `structure` must be provided. output : ndarray An output array can optionally be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled. For 'constant' mode, values beyond borders are set to be `cval`. Default is 'reflect'. cval : scalar, optional Value to fill past edges of input if mode is 'constant'. Default is 0.0 origin : origin The origin parameter controls the placement of the filter. Returns ------- morphological_laplace : ndarray Output """ tmp1 = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) numpy.add(tmp1, output, output) numpy.subtract(output, input, output) return numpy.subtract(output, input, output) else: tmp2 = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) numpy.add(tmp1, tmp2, tmp2) numpy.subtract(tmp2, input, tmp2) numpy.subtract(tmp2, input, tmp2) return tmp2 def white_tophat(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional white tophat filter. Parameters ---------- input : array_like Input. size : tuple of ints Shape of a flat and full structuring element used for the filter. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of elements of a flat structuring element used for the white tophat filter. structure : array of ints, optional Structuring element used for the filter. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the filter may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default is 0. Returns ------- output : ndarray Result of the filter of `input` with `structure`. See also -------- black_tophat """ tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_dilation(tmp, size, footprint, structure, output, mode, cval, origin) return numpy.subtract(input, output, output) else: tmp = grey_dilation(tmp, size, footprint, structure, None, mode, cval, origin) return input - tmp def black_tophat(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multi-dimensional black tophat filter. Parameters ---------- input : array_like Input. size : tuple of ints Shape of a flat and full structuring element used for the filter. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the black tophat filter. structure : array of ints, optional Structuring element used for the filter. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the filter may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- black_tophat : ndarray Result of the filter of `input` with `structure`. See also -------- white_tophat, grey_opening, grey_closing """ tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin) return numpy.subtract(output, input, output) else: tmp = grey_erosion(tmp, size, footprint, structure, None, mode, cval, origin) return tmp - input def distance_transform_bf(input, metric="euclidean", sampling=None, return_distances=True, return_indices=False, distances=None, indices=None): """ Distance transform function by a brute force algorithm. This function calculates the distance transform of the `input`, by replacing each background element (zero values), with its shortest distance to the foreground (any element non-zero). In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element is returned along the first axis of the result. Parameters ---------- input : array_like Input metric : str, optional Three types of distance metric are supported: 'euclidean', 'taxicab' and 'chessboard'. sampling : {int, sequence of ints}, optional This parameter is only used in the case of the euclidean `metric` distance transform. The sampling along each axis can be given by the `sampling` parameter which should be a sequence of length equal to the input rank, or a single number in which the `sampling` is assumed to be equal along all axes. return_distances : bool, optional The `return_distances` flag can be used to indicate if the distance transform is returned. The default is True. return_indices : bool, optional The `return_indices` flags can be used to indicate if the feature transform is returned. The default is False. distances : float64 ndarray, optional Optional output array to hold distances (if `return_distances` is True). indices : int64 ndarray, optional Optional output array to hold indices (if `return_indices` is True). Returns ------- distances : ndarray Distance array if `return_distances` is True. indices : ndarray Indices array if `return_indices` is True. Notes ----- This function employs a slow brute force algorithm, see also the function distance_transform_cdt for more efficient taxicab and chessboard algorithms. """ if (not return_distances) and (not return_indices): msg = 'at least one of distances/indices must be specified' raise RuntimeError(msg) tmp1 = numpy.asarray(input) != 0 struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) tmp2 = binary_dilation(tmp1, struct) tmp2 = numpy.logical_xor(tmp1, tmp2) tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8) metric = metric.lower() if metric == 'euclidean': metric = 1 elif metric in ['taxicab', 'cityblock', 'manhattan']: metric = 2 elif metric == 'chessboard': metric = 3 else: raise RuntimeError('distance metric not supported') if sampling is not None: sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) sampling = numpy.asarray(sampling, dtype=numpy.float64) if not sampling.flags.contiguous: sampling = sampling.copy() if return_indices: ft = numpy.zeros(tmp1.shape, dtype=numpy.int32) else: ft = None if return_distances: if distances is None: if metric == 1: dt = numpy.zeros(tmp1.shape, dtype=numpy.float64) else: dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32) else: if distances.shape != tmp1.shape: raise RuntimeError('distances array has wrong shape') if metric == 1: if distances.dtype.type != numpy.float64: raise RuntimeError('distances array must be float64') else: if distances.dtype.type != numpy.uint32: raise RuntimeError('distances array must be uint32') dt = distances else: dt = None _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) if return_indices: if isinstance(indices, numpy.ndarray): if indices.dtype.type != numpy.int32: raise RuntimeError('indices must of int32 type') if indices.shape != (tmp1.ndim,) + tmp1.shape: raise RuntimeError('indices has wrong shape') tmp2 = indices else: tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32) ft = numpy.ravel(ft) for ii in range(tmp2.shape[0]): rtmp = numpy.ravel(tmp2[ii, ...])[ft] rtmp.shape = tmp1.shape tmp2[ii, ...] = rtmp ft = tmp2 # construct and return the result result = [] if return_distances and not isinstance(distances, numpy.ndarray): result.append(dt) if return_indices and not isinstance(indices, numpy.ndarray): result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None def distance_transform_cdt(input, metric='chessboard', return_distances=True, return_indices=False, distances=None, indices=None): """ Distance transform for chamfer type of transforms. Parameters ---------- input : array_like Input metric : {'chessboard', 'taxicab'}, optional The `metric` determines the type of chamfering that is done. If the `metric` is equal to 'taxicab' a structure is generated using generate_binary_structure with a squared distance equal to 1. If the `metric` is equal to 'chessboard', a `metric` is generated using generate_binary_structure with a squared distance equal to the dimensionality of the array. These choices correspond to the common interpretations of the 'taxicab' and the 'chessboard' distance metrics in two dimensions. The default for `metric` is 'chessboard'. return_distances, return_indices : bool, optional The `return_distances`, and `return_indices` flags can be used to indicate if the distance transform, the feature transform, or both must be returned. If the feature transform is returned (``return_indices=True``), the index of the closest background element is returned along the first axis of the result. The `return_distances` default is True, and the `return_indices` default is False. distances, indices : ndarrays of int32, optional The `distances` and `indices` arguments can be used to give optional output arrays that must be the same shape as `input`. """ if (not return_distances) and (not return_indices): msg = 'at least one of distances/indices must be specified' raise RuntimeError(msg) ft_inplace = isinstance(indices, numpy.ndarray) dt_inplace = isinstance(distances, numpy.ndarray) input = numpy.asarray(input) if metric in ['taxicab', 'cityblock', 'manhattan']: rank = input.ndim metric = generate_binary_structure(rank, 1) elif metric == 'chessboard': rank = input.ndim metric = generate_binary_structure(rank, rank) else: try: metric = numpy.asarray(metric) except: raise RuntimeError('invalid metric provided') for s in metric.shape: if s != 3: raise RuntimeError('metric sizes must be equal to 3') if not metric.flags.contiguous: metric = metric.copy() if dt_inplace: if distances.dtype.type != numpy.int32: raise RuntimeError('distances must be of int32 type') if distances.shape != input.shape: raise RuntimeError('distances has wrong shape') dt = distances dt[...] = numpy.where(input, -1, 0).astype(numpy.int32) else: dt = numpy.where(input, -1, 0).astype(numpy.int32) rank = dt.ndim if return_indices: sz = numpy.product(dt.shape,axis=0) ft = numpy.arange(sz, dtype=numpy.int32) ft.shape = dt.shape else: ft = None _nd_image.distance_transform_op(metric, dt, ft) dt = dt[tuple([slice(None, None, -1)] * rank)] if return_indices: ft = ft[tuple([slice(None, None, -1)] * rank)] _nd_image.distance_transform_op(metric, dt, ft) dt = dt[tuple([slice(None, None, -1)] * rank)] if return_indices: ft = ft[tuple([slice(None, None, -1)] * rank)] ft = numpy.ravel(ft) if ft_inplace: if indices.dtype.type != numpy.int32: raise RuntimeError('indices must of int32 type') if indices.shape != (dt.ndim,) + dt.shape: raise RuntimeError('indices has wrong shape') tmp = indices else: tmp = numpy.indices(dt.shape, dtype=numpy.int32) for ii in range(tmp.shape[0]): rtmp = numpy.ravel(tmp[ii, ...])[ft] rtmp.shape = dt.shape tmp[ii, ...] = rtmp ft = tmp # construct and return the result result = [] if return_distances and not dt_inplace: result.append(dt) if return_indices and not ft_inplace: result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None def distance_transform_edt(input, sampling=None, return_distances=True, return_indices=False, distances=None, indices=None): """ Exact euclidean distance transform. In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element is returned along the first axis of the result. Parameters ---------- input : array_like Input data to transform. Can be any type but will be converted into binary: 1 wherever input equates to True, 0 elsewhere. sampling : float or int, or sequence of same, optional Spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. return_distances : bool, optional Whether to return distance matrix. At least one of return_distances/return_indices must be True. Default is True. return_indices : bool, optional Whether to return indices matrix. Default is False. distance : ndarray, optional Used for output of distance array, must be of type float64. indices : ndarray, optional Used for output of indices, must be of type int32. Returns ------- distance_transform_edt : ndarray or list of ndarrays Either distance matrix, index matrix, or a list of the two, depending on `return_x` flags and `distance` and `indices` input parameters. Notes ----- The euclidean distance transform gives values of the euclidean distance:: n y_i = sqrt(sum (x[i]-b[i])**2) i where b[i] is the background point (value 0) with the smallest Euclidean distance to input points x[i], and n is the number of dimensions. Examples -------- >>> a = np.array(([0,1,1,1,1], [0,0,1,1,1], [0,1,1,1,1], [0,1,1,1,0], [0,1,1,0,0])) >>> from scipy import ndimage >>> ndimage.distance_transform_edt(a) array([[ 0. , 1. , 1.4142, 2.2361, 3. ], [ 0. , 0. , 1. , 2. , 2. ], [ 0. , 1. , 1.4142, 1.4142, 1. ], [ 0. , 1. , 1.4142, 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) With a sampling of 2 units along x, 1 along y: >>> ndimage.distance_transform_edt(a, sampling=[2,1]) array([[ 0. , 1. , 2. , 2.8284, 3.6056], [ 0. , 0. , 1. , 2. , 3. ], [ 0. , 1. , 2. , 2.2361, 2. ], [ 0. , 1. , 2. , 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) Asking for indices as well: >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) >>> inds array([[[0, 0, 1, 1, 3], [1, 1, 1, 1, 3], [2, 2, 1, 3, 3], [3, 3, 4, 4, 3], [4, 4, 4, 4, 4]], [[0, 0, 1, 1, 4], [0, 1, 1, 1, 4], [0, 0, 1, 4, 4], [0, 0, 3, 3, 4], [0, 0, 3, 3, 4]]]) With arrays provided for inplace outputs: >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) array([[ 0. , 1. , 1.4142, 2.2361, 3. ], [ 0. , 0. , 1. , 2. , 2. ], [ 0. , 1. , 1.4142, 1.4142, 1. ], [ 0. , 1. , 1.4142, 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) >>> indices array([[[0, 0, 1, 1, 3], [1, 1, 1, 1, 3], [2, 2, 1, 3, 3], [3, 3, 4, 4, 3], [4, 4, 4, 4, 4]], [[0, 0, 1, 1, 4], [0, 1, 1, 1, 4], [0, 0, 1, 4, 4], [0, 0, 3, 3, 4], [0, 0, 3, 3, 4]]]) """ if (not return_distances) and (not return_indices): msg = 'at least one of distances/indices must be specified' raise RuntimeError(msg) ft_inplace = isinstance(indices, numpy.ndarray) dt_inplace = isinstance(distances, numpy.ndarray) # calculate the feature transform input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8)) if sampling is not None: sampling = _ni_support._normalize_sequence(sampling, input.ndim) sampling = numpy.asarray(sampling, dtype=numpy.float64) if not sampling.flags.contiguous: sampling = sampling.copy() if ft_inplace: ft = indices if ft.shape != (input.ndim,) + input.shape: raise RuntimeError('indices has wrong shape') if ft.dtype.type != numpy.int32: raise RuntimeError('indices must be of int32 type') else: ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32) _nd_image.euclidean_feature_transform(input, sampling, ft) # if requested, calculate the distance transform if return_distances: dt = ft - numpy.indices(input.shape, dtype=ft.dtype) dt = dt.astype(numpy.float64) if sampling is not None: for ii in range(len(sampling)): dt[ii, ...] *= sampling[ii] numpy.multiply(dt, dt, dt) if dt_inplace: dt = numpy.add.reduce(dt, axis=0) if distances.shape != dt.shape: raise RuntimeError('indices has wrong shape') if distances.dtype.type != numpy.float64: raise RuntimeError('indices must be of float64 type') numpy.sqrt(dt, distances) else: dt = numpy.add.reduce(dt, axis=0) dt = numpy.sqrt(dt) # construct and return the result result = [] if return_distances and not dt_inplace: result.append(dt) if return_indices and not ft_inplace: result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None
mit
gfonk/ansible
contrib/inventory/freeipa.py
95
2201
#!/usr/bin/env python import argparse from ipalib import api import json def initialize(): ''' This function initializes the FreeIPA/IPA API. This function requires no arguments. A kerberos key must be present in the users keyring in order for this to work. ''' api.bootstrap(context='cli') api.finalize() try: api.Backend.rpcclient.connect() except AttributeError: #FreeIPA < 4.0 compatibility api.Backend.xmlclient.connect() return api def list_groups(api): ''' This function returns a list of all host groups. This function requires one argument, the FreeIPA/IPA API object. ''' inventory = {} hostvars={} meta={} result = api.Command.hostgroup_find()['result'] for hostgroup in result: inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]} for host in hostgroup['member_host']: hostvars[host] = {} inventory['_meta'] = {'hostvars': hostvars} inv_string = json.dumps(inventory, indent=1, sort_keys=True) print inv_string return None def parse_args(): ''' This function parses the arguments that were passed in via the command line. This function expects no arguments. ''' parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specified host') return parser.parse_args() def print_host(host): ''' This function is really a stub, it could return variables to be used in a playbook. However, at this point there are no variables stored in FreeIPA/IPA. This function expects one string, this hostname to lookup variables for. ''' print json.dumps({}) return None if __name__ == '__main__': args = parse_args() if args.host: print_host(args.host) elif args.list: api = initialize() list_groups(api)
gpl-3.0
ageron/tensorflow
tensorflow/python/kernel_tests/reduction_ops_test_big.py
30
8764
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for reduction ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class BaseReductionTest(test.TestCase): def _tf_reduce(self, x, reduction_axes, keepdims): raise NotImplementedError() class BigReductionTest(BaseReductionTest): """Test reductions for sum and boolean all over a wide range of shapes.""" def _tf_reduce_max(self, x, reduction_axes, keepdims): return math_ops.reduce_max(x, reduction_axes, keepdims) def _tf_reduce_all(self, x, reduction_axes, keepdims): return math_ops.reduce_all(x, reduction_axes, keepdims) def _tf_reduce_mean(self, x, reduction_axes, keepdims): return math_ops.reduce_mean(x, reduction_axes, keepdims) def _tf_reduce_sum(self, x, reduction_axes, keepdims): return math_ops.reduce_sum(x, reduction_axes, keepdims) @test_util.run_deprecated_v1 def testFloat32Sum(self): # make sure we test all possible kernel invocations # logic is the same for all ops, test just float32 for brevity arr_ = np.ones([4097, 4097], dtype=np.float32) for size_x in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: for size_y in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: arr = arr_[0:size_x, 0:size_y] col_sum = np.ones([size_y], dtype=np.float32) * size_x row_sum = np.ones([size_x], dtype=np.float32) * size_y full_sum = np.ones([], dtype=np.float32) * size_x * size_y with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder(dtype=np.float32, shape=(size_x, size_y)) tf_row_sum = self._tf_reduce_sum(arr_placeholder, 1, False) tf_col_sum = self._tf_reduce_sum(arr_placeholder, 0, False) tf_full_sum = self._tf_reduce_sum(arr_placeholder, [0, 1], False) tf_out_row, tf_out_col, tf_out_full = sess.run( [tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr}) self.assertAllClose(col_sum, tf_out_col) self.assertAllClose(row_sum, tf_out_row) self.assertAllClose(full_sum, tf_out_full) arr_ = np.ones([130, 130, 130], dtype=np.float32) for size_x in range(1, 130, 13): for size_y in range(1, 130, 13): for size_z in range(1, 130, 13): arr = arr_[0:size_x, 0:size_y, 0:size_z] sum_y = np.ones([size_x, size_z], dtype=np.float32) sum_xz = np.ones([size_y], dtype=np.float32) with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder( dtype=np.float32, shape=(size_x, size_y, size_z)) tf_sum_xz = self._tf_reduce_mean(arr_placeholder, [0, 2], False) tf_sum_y = self._tf_reduce_mean(arr_placeholder, 1, False) tf_out_sum_xz, tf_out_sum_y = sess.run([tf_sum_xz, tf_sum_y], {arr_placeholder: arr}) self.assertAllClose(sum_y, tf_out_sum_y) self.assertAllClose(sum_xz, tf_out_sum_xz) @test_util.run_deprecated_v1 def testFloat32Max(self): # make sure we test all possible kernel invocations # logic is the same for all ops, test just float32 for brevity arr_ = np.random.uniform( low=-3, high=-1, size=[4105, 4105]).astype(np.float32) for size_x in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: for size_y in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: arr = arr_[0:size_x, 0:size_y] col_max = np.max(arr, axis=0) row_max = np.max(arr, axis=1) full_max = np.max(col_max) with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder(dtype=np.float32, shape=(size_x, size_y)) tf_row_max = self._tf_reduce_max(arr_placeholder, 1, False) tf_col_max = self._tf_reduce_max(arr_placeholder, 0, False) tf_full_max = self._tf_reduce_max(arr_placeholder, [0, 1], False) tf_out_row, tf_out_col, tf_out_full = sess.run( [tf_row_max, tf_col_max, tf_full_max], {arr_placeholder: arr}) self.assertAllClose(col_max, tf_out_col) self.assertAllClose(row_max, tf_out_row) self.assertAllClose(full_max, tf_out_full) arr_ = np.random.uniform( low=-3, high=-1, size=[130, 130, 130]).astype(np.float32) for size_x in range(1, 130, 13): for size_y in range(1, 130, 13): for size_z in range(1, 130, 13): arr = arr_[0:size_x, 0:size_y, 0:size_z] sum_y = np.max(arr, axis=1) sum_xz = np.max(arr, axis=(0, 2)) with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder( dtype=np.float32, shape=(size_x, size_y, size_z)) tf_sum_xz = self._tf_reduce_max(arr_placeholder, [0, 2], False) tf_sum_y = self._tf_reduce_max(arr_placeholder, 1, False) tf_out_sum_xz, tf_out_sum_y = sess.run( [tf_sum_xz, tf_sum_y], {arr_placeholder: arr}) self.assertAllClose(sum_y, tf_out_sum_y) self.assertAllClose(sum_xz, tf_out_sum_xz) @test_util.run_deprecated_v1 def testBooleanAll(self): # make sure we test all possible kernel invocations # test operation where T(0) is not the identity arr_ = np.ones([4097, 4097], dtype=np.bool) for size_x in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: for size_y in [ 1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025, 4096, 4097 ]: arr = arr_[0:size_x, 0:size_y] col_sum = np.ones([size_y], dtype=np.bool) row_sum = np.ones([size_x], dtype=np.bool) full_sum = np.ones([1], dtype=np.bool).reshape([]) with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder(dtype=np.bool, shape=(size_x, size_y)) tf_row_sum = self._tf_reduce_all(arr_placeholder, 1, False) tf_col_sum = self._tf_reduce_all(arr_placeholder, 0, False) tf_full_sum = self._tf_reduce_all(arr_placeholder, [0, 1], False) tf_out_row, tf_out_col, tf_out_full = sess.run( [tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr}) self.assertAllClose(col_sum, tf_out_col) self.assertAllClose(row_sum, tf_out_row) self.assertAllClose(full_sum, tf_out_full) arr_ = np.ones([130, 130, 130], dtype=np.bool) for size_x in range(1, 130, 13): for size_y in range(1, 130, 13): for size_z in range(1, 130, 13): arr = arr_[0:size_x, 0:size_y, 0:size_z] sum_y = np.ones([size_x, size_z], dtype=np.bool) sum_xz = np.ones([size_y], dtype=np.bool) with self.session(graph=ops.Graph(), use_gpu=True) as sess: arr_placeholder = array_ops.placeholder( dtype=np.bool, shape=(size_x, size_y, size_z)) tf_sum_xz = self._tf_reduce_all(arr_placeholder, [0, 2], False) tf_sum_y = self._tf_reduce_all(arr_placeholder, 1, False) tf_out_sum_xz, tf_out_sum_y = sess.run( [tf_sum_xz, tf_sum_y], {arr_placeholder: arr}) self.assertAllClose(sum_y, tf_out_sum_y) self.assertAllClose(sum_xz, tf_out_sum_xz) if __name__ == "__main__": test.main()
apache-2.0
knehez/edx-platform
common/djangoapps/track/backends/mongodb.py
41
3046
"""MongoDB event tracker backend.""" from __future__ import absolute_import import logging import pymongo from pymongo import MongoClient from pymongo.errors import PyMongoError from bson.errors import BSONError from track.backends import BaseBackend log = logging.getLogger(__name__) class MongoBackend(BaseBackend): """Class for a MongoDB event tracker Backend""" def __init__(self, **kwargs): """ Connect to a MongoDB. :Parameters: - `host`: hostname - `port`: port - `user`: collection username - `password`: collection user password - `database`: name of the database - `collection`: name of the collection - `extra`: parameters to pymongo.MongoClient not listed above """ super(MongoBackend, self).__init__(**kwargs) # Extract connection parameters from kwargs host = kwargs.get('host', 'localhost') port = kwargs.get('port', 27017) user = kwargs.get('user', '') password = kwargs.get('password', '') db_name = kwargs.get('database', 'track') collection_name = kwargs.get('collection', 'events') # Other mongo connection arguments extra = kwargs.get('extra', {}) # By default disable write acknowledgments, reducing the time # blocking during an insert extra['w'] = extra.get('w', 0) # Make timezone aware by default extra['tz_aware'] = extra.get('tz_aware', True) # Connect to database and get collection self.connection = MongoClient( host=host, port=port, **extra ) database = self.connection[db_name] if user or password: database.authenticate(user, password) self.collection = database[collection_name] self._create_indexes() def _create_indexes(self): """Ensures the proper fields are indexed""" # WARNING: The collection will be locked during the index # creation. If the collection has a large number of # documents in it, the operation can take a long time. # TODO: The creation of indexes can be moved to a Django # management command or equivalent. There is also an option to # run the indexing on the background, without locking. self.collection.ensure_index([('time', pymongo.DESCENDING)]) self.collection.ensure_index('event_type') def send(self, event): """Insert the event in to the Mongo collection""" try: self.collection.insert(event, manipulate=False) except (PyMongoError, BSONError): # The event will be lost in case of a connection error or any error # that occurs when trying to insert the event into Mongo. # pymongo will re-connect/re-authenticate automatically # during the next event. msg = 'Error inserting to MongoDB event tracker backend' log.exception(msg)
agpl-3.0
yewang15215/django
tests/template_tests/test_nodelist.py
173
3234
from unittest import TestCase from django.template import Context, Engine from django.template.base import TextNode, VariableNode from django.utils import six class NodelistTest(TestCase): @classmethod def setUpClass(cls): cls.engine = Engine() super(NodelistTest, cls).setUpClass() def test_for(self): template = self.engine.from_string('{% for i in 1 %}{{ a }}{% endfor %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_if(self): template = self.engine.from_string('{% if x %}{{ a }}{% endif %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifequal(self): template = self.engine.from_string('{% ifequal x y %}{{ a }}{% endifequal %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifchanged(self): template = self.engine.from_string('{% ifchanged x %}{{ a }}{% endifchanged %}') vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) class TextNodeTest(TestCase): def test_textnode_repr(self): engine = Engine() for temptext, reprtext in [ ("Hello, world!", "<TextNode: u'Hello, world!'>"), ("One\ntwo.", "<TextNode: u'One\\ntwo.'>"), ]: template = engine.from_string(temptext) texts = template.nodelist.get_nodes_by_type(TextNode) if six.PY3: reprtext = reprtext.replace("u'", "'") self.assertEqual(repr(texts[0]), reprtext) class ErrorIndexTest(TestCase): """ Checks whether index of error is calculated correctly in template debugger in for loops. Refs ticket #5831 """ def test_correct_exception_index(self): tests = [ ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)), ( '{% load bad_tag %}{% for i in range %}{% for j in range %}' '{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76) ), ( '{% load bad_tag %}{% for i in range %}{% badsimpletag %}' '{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56) ), ( '{% load bad_tag %}{% for i in range %}{% for j in five %}' '{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57) ), ('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)), ] context = Context({ 'range': range(5), 'five': 5, }) engine = Engine(debug=True, libraries={'bad_tag': 'template_tests.templatetags.bad_tag'}) for source, expected_error_source_index in tests: template = engine.from_string(source) try: template.render(context) except (RuntimeError, TypeError) as e: debug = e.template_debug self.assertEqual((debug['start'], debug['end']), expected_error_source_index)
bsd-3-clause
JamesLinEngineer/RKMC
addons/plugin.video.salts/scrapers/hevcbluray_scraper.py
1
4696
""" SALTS XBMC Addon Copyright (C) 2014 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import scraper import urlparse import re import kodi import log_utils # @UnusedImport import dom_parser from salts_lib import scraper_utils from salts_lib.constants import VIDEO_TYPES from salts_lib.constants import FORCE_NO_MATCH from salts_lib.constants import QUALITIES BASE_URL = 'https://hevcbluray.com' QUALITY_MAP = {'HD 720P': QUALITIES.HD720, 'HD 1080P': QUALITIES.HD1080, '1080P BLURAY': QUALITIES.HD1080} class Scraper(scraper.Scraper): base_url = BASE_URL def __init__(self, timeout=scraper.DEFAULT_TIMEOUT): self.timeout = timeout self.base_url = kodi.get_setting('%s-base_url' % (self.get_name())) @classmethod def provides(cls): return frozenset([VIDEO_TYPES.MOVIE]) @classmethod def get_name(cls): return 'HEVCBluRay' def get_sources(self, video): source_url = self.get_url(video) sources = [] if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(self.base_url, source_url) html = self._http_get(url, cache_limit=.5) is_3d = False page_quality = QUALITIES.HD720 title = dom_parser.parse_dom(html, 'title') if title: title = title[0] match = re.search('(\d{3,})p', title) if match: page_quality = scraper_utils.height_get_quality(match.group(1)) is_3d = True if re.search('\s+3D\s+', title) else False fragments = dom_parser.parse_dom(html, 'div', {'class': 'txt-block'}) + dom_parser.parse_dom(html, 'li', {'class': 'elemento'}) for fragment in fragments: for match in re.finditer('href="([^"]+)', fragment): stream_url = match.group(1) host = urlparse.urlparse(stream_url).hostname q_str = dom_parser.parse_dom(fragment, 'span', {'class': 'd'}) q_str = q_str[0].upper() if q_str else '' base_quality = QUALITY_MAP.get(q_str, page_quality) quality = scraper_utils.get_quality(video, host, base_quality) source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': False} source['format'] = 'x265' source['3D'] = is_3d sources.append(source) return sources def search(self, video_type, title, year, season=''): # @UnusedVariable results = [] html = self._http_get(self.base_url, params={'s': title}, cache_limit=8) for item in dom_parser.parse_dom(html, 'div', {'class': 'item'}): match = re.search('href="([^"]+)', item) match_title = dom_parser.parse_dom(item, 'span', {'class': 'tt'}) year_frag = dom_parser.parse_dom(item, 'span', {'class': 'year'}) if match and match_title: url = match.group(1) match_title = match_title[0] if re.search('\d+\s*x\s*\d+', match_title): continue # exclude episodes match_title, match_year = scraper_utils.extra_year(match_title) if not match_year and year_frag: match_year = year_frag[0] match = re.search('(.*?)\s+\d{3,}p', match_title) if match: match_title = match.group(1) extra = dom_parser.parse_dom(item, 'span', {'class': 'calidad2'}) if extra: match_title += ' [%s]' % (extra[0]) if not year or not match_year or year == match_year: result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(url)} results.append(result) return results
gpl-2.0
snasoft/QtCreatorPluginsPack
Bin/3rdParty/vera/bin/lib/multiprocessing/dummy/connection.py
168
2807
# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from Queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None address = property(lambda self: self._backlog_queue) def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False self._in.not_empty.acquire() self._in.not_empty.wait(timeout) self._in.not_empty.release() return self._in.qsize() > 0 def close(self): pass
lgpl-3.0
jkankiewicz/kivy
kivy/gesture.py
23
14633
''' Gesture recognition =================== This class allows you to easily create new gestures and compare them:: from kivy.gesture import Gesture, GestureDatabase # Create a gesture g = Gesture() g.add_stroke(point_list=[(1,1), (3,4), (2,1)]) g.normalize() # Add it to the database gdb = GestureDatabase() gdb.add_gesture(g) # And for the next gesture, try to find it! g2 = Gesture() # ... gdb.find(g2) .. warning:: You don't really want to do this: it's more of an example of how to construct gestures dynamically. Typically, you would need a lot more points, so it's better to record gestures in a file and reload them to compare later. Look in the examples/gestures directory for an example of how to do that. ''' __all__ = ('Gesture', 'GestureDatabase', 'GesturePoint', 'GestureStroke') import pickle import base64 import zlib import math from kivy.vector import Vector from io import BytesIO class GestureDatabase(object): '''Class to handle a gesture database.''' def __init__(self): self.db = [] def add_gesture(self, gesture): '''Add a new gesture to the database.''' self.db.append(gesture) def find(self, gesture, minscore=0.9, rotation_invariant=True): '''Find a matching gesture in the database.''' if not gesture: return best = None bestscore = minscore for g in self.db: score = g.get_score(gesture, rotation_invariant) if score < bestscore: continue bestscore = score best = g if not best: return return (bestscore, best) def gesture_to_str(self, gesture): '''Convert a gesture into a unique string.''' io = BytesIO() p = pickle.Pickler(io) p.dump(gesture) data = base64.b64encode(zlib.compress(io.getvalue(), 9)) return data def str_to_gesture(self, data): '''Convert a unique string to a gesture.''' io = BytesIO(zlib.decompress(base64.b64decode(data))) p = pickle.Unpickler(io) gesture = p.load() return gesture class GesturePoint: def __init__(self, x, y): '''Stores the x,y coordinates of a point in the gesture.''' self.x = float(x) self.y = float(y) def scale(self, factor): ''' Scales the point by the given factor.''' self.x *= factor self.y *= factor return self def __repr__(self): return 'Mouse_point: %f,%f' % (self.x, self.y) class GestureStroke: ''' Gestures can be made up of multiple strokes.''' def __init__(self): ''' A stroke in the gesture.''' self.points = list() self.screenpoints = list() # These return the min and max coordinates of the stroke @property def max_x(self): if len(self.points) == 0: return 0 return max(self.points, key=lambda pt: pt.x).x @property def min_x(self): if len(self.points) == 0: return 0 return min(self.points, key=lambda pt: pt.x).x @property def max_y(self): if len(self.points) == 0: return 0 return max(self.points, key=lambda pt: pt.y).y @property def min_y(self): if len(self.points) == 0: return 0 return min(self.points, key=lambda pt: pt.y).y def add_point(self, x, y): ''' add_point(x=x_pos, y=y_pos) Adds a point to the stroke. ''' self.points.append(GesturePoint(x, y)) self.screenpoints.append((x, y)) def scale_stroke(self, scale_factor): ''' scale_stroke(scale_factor=float) Scales the stroke down by scale_factor. ''' self.points = [pt.scale(scale_factor) for pt in self.points] def points_distance(self, point1, point2): ''' points_distance(point1=GesturePoint, point2=GesturePoint) Returns the distance between two GesturePoints. ''' x = point1.x - point2.x y = point1.y - point2.y return math.sqrt(x * x + y * y) def stroke_length(self, point_list=None): '''Finds the length of the stroke. If a point list is given, finds the length of that list. ''' if point_list is None: point_list = self.points gesture_length = 0.0 if len(point_list) <= 1: # If there is only one point -> no length return gesture_length for i in range(len(point_list) - 1): gesture_length += self.points_distance( point_list[i], point_list[i + 1]) return gesture_length def normalize_stroke(self, sample_points=32): '''Normalizes strokes so that every stroke has a standard number of points. Returns True if stroke is normalized, False if it can't be normalized. sample_points controls the resolution of the stroke. ''' # If there is only one point or the length is 0, don't normalize if len(self.points) <= 1 or self.stroke_length(self.points) == 0.0: return False # Calculate how long each point should be in the stroke target_stroke_size = \ self.stroke_length(self.points) / float(sample_points) new_points = list() new_points.append(self.points[0]) # We loop on the points prev = self.points[0] src_distance = 0.0 dst_distance = target_stroke_size for curr in self.points[1:]: d = self.points_distance(prev, curr) if d > 0: prev = curr src_distance = src_distance + d # The new point need to be inserted into the # segment [prev, curr] while dst_distance < src_distance: x_dir = curr.x - prev.x y_dir = curr.y - prev.y ratio = (src_distance - dst_distance) / d to_x = x_dir * ratio + prev.x to_y = y_dir * ratio + prev.y new_points.append(GesturePoint(to_x, to_y)) dst_distance = self.stroke_length(self.points) / \ float(sample_points) * len(new_points) # If this happens, we are into troubles... if not len(new_points) == sample_points: raise ValueError('Invalid number of strokes points; got ' '%d while it should be %d' % (len(new_points), sample_points)) self.points = new_points return True def center_stroke(self, offset_x, offset_y): '''Centers the stroke by offsetting the points.''' for point in self.points: point.x -= offset_x point.y -= offset_y class Gesture: '''A python implementation of a gesture recognition algorithm by Oleg Dopertchouk: http://www.gamedev.net/reference/articles/article2039.asp Implemented by Jeiel Aranal (chemikhazi@gmail.com), released into the public domain. ''' # Tolerance for evaluation using the '==' operator DEFAULT_TOLERANCE = 0.1 def __init__(self, tolerance=None): ''' Gesture([tolerance=float]) Creates a new gesture with an optional matching tolerance value. ''' self.width = 0. self.height = 0. self.gesture_product = 0. self.strokes = list() if tolerance is None: self.tolerance = Gesture.DEFAULT_TOLERANCE else: self.tolerance = tolerance def _scale_gesture(self): ''' Scales down the gesture to a unit of 1.''' # map() creates a list of min/max coordinates of the strokes # in the gesture and min()/max() pulls the lowest/highest value min_x = min([stroke.min_x for stroke in self.strokes]) max_x = max([stroke.max_x for stroke in self.strokes]) min_y = min([stroke.min_y for stroke in self.strokes]) max_y = max([stroke.max_y for stroke in self.strokes]) x_len = max_x - min_x self.width = x_len y_len = max_y - min_y self.height = y_len scale_factor = max(x_len, y_len) if scale_factor <= 0.0: return False scale_factor = 1.0 / scale_factor for stroke in self.strokes: stroke.scale_stroke(scale_factor) return True def _center_gesture(self): ''' Centers the Gesture.points of the gesture.''' total_x = 0.0 total_y = 0.0 total_points = 0 for stroke in self.strokes: # adds up all the points inside the stroke stroke_y = sum([pt.y for pt in stroke.points]) stroke_x = sum([pt.x for pt in stroke.points]) total_y += stroke_y total_x += stroke_x total_points += len(stroke.points) if total_points == 0: return False # Average to get the offset total_x /= total_points total_y /= total_points # Apply the offset to the strokes for stroke in self.strokes: stroke.center_stroke(total_x, total_y) return True def add_stroke(self, point_list=None): '''Adds a stroke to the gesture and returns the Stroke instance. Optional point_list argument is a list of the mouse points for the stroke. ''' self.strokes.append(GestureStroke()) if isinstance(point_list, list) or isinstance(point_list, tuple): for point in point_list: if isinstance(point, GesturePoint): self.strokes[-1].points.append(point) elif isinstance(point, list) or isinstance(point, tuple): if len(point) != 2: raise ValueError("Stroke entry must have 2 values max") self.strokes[-1].add_point(point[0], point[1]) else: raise TypeError("The point list should either be " "tuples of x and y or a list of " "GesturePoint objects") elif point_list is not None: raise ValueError("point_list should be a tuple/list") return self.strokes[-1] def normalize(self, stroke_samples=32): '''Runs the gesture normalization algorithm and calculates the dot product with self. ''' if not self._scale_gesture() or not self._center_gesture(): self.gesture_product = False return False for stroke in self.strokes: stroke.normalize_stroke(stroke_samples) self.gesture_product = self.dot_product(self) def get_rigid_rotation(self, dstpts): ''' Extract the rotation to apply to a group of points to minimize the distance to a second group of points. The two groups of points are assumed to be centered. This is a simple version that just picks an angle based on the first point of the gesture. ''' if len(self.strokes) < 1 or len(self.strokes[0].points) < 1: return 0 if len(dstpts.strokes) < 1 or len(dstpts.strokes[0].points) < 1: return 0 p = dstpts.strokes[0].points[0] target = Vector([p.x, p.y]) source = Vector([p.x, p.y]) return source.angle(target) def dot_product(self, comparison_gesture): ''' Calculates the dot product of the gesture with another gesture.''' if len(comparison_gesture.strokes) != len(self.strokes): return -1 if getattr(comparison_gesture, 'gesture_product', True) is False or \ getattr(self, 'gesture_product', True) is False: return -1 dot_product = 0.0 for stroke_index, (my_stroke, cmp_stroke) in enumerate( list(zip(self.strokes, comparison_gesture.strokes))): for pt_index, (my_point, cmp_point) in enumerate( list(zip(my_stroke.points, cmp_stroke.points))): dot_product += (my_point.x * cmp_point.x + my_point.y * cmp_point.y) return dot_product def rotate(self, angle): g = Gesture() for stroke in self.strokes: tmp = [] for j in stroke.points: v = Vector([j.x, j.y]).rotate(angle) tmp.append(v) g.add_stroke(tmp) g.gesture_product = g.dot_product(g) return g def get_score(self, comparison_gesture, rotation_invariant=True): ''' Returns the matching score of the gesture against another gesture. ''' if isinstance(comparison_gesture, Gesture): if rotation_invariant: # get orientation angle = self.get_rigid_rotation(comparison_gesture) # rotate the gesture to be in the same frame. comparison_gesture = comparison_gesture.rotate(angle) # this is the normal "orientation" code. score = self.dot_product(comparison_gesture) if score <= 0: return score score /= math.sqrt( self.gesture_product * comparison_gesture.gesture_product) return score def __eq__(self, comparison_gesture): ''' Allows easy comparisons between gesture instances.''' if isinstance(comparison_gesture, Gesture): # If the gestures don't have the same number of strokes, its # definitely not the same gesture score = self.get_score(comparison_gesture) if (score > (1.0 - self.tolerance) and score < (1.0 + self.tolerance)): return True else: return False else: return NotImplemented def __ne__(self, comparison_gesture): result = self.__eq__(comparison_gesture) if result is NotImplemented: return result else: return not result def __lt__(self, comparison_gesture): raise TypeError("Gesture cannot be evaluated with <") def __gt__(self, comparison_gesture): raise TypeError("Gesture cannot be evaluated with >") def __le__(self, comparison_gesture): raise TypeError("Gesture cannot be evaluated with <=") def __ge__(self, comparison_gesture): raise TypeError("Gesture cannot be evaluated with >=")
mit
qwertyjune/BethSaidaBible
venv/lib/python2.7/site-packages/django/contrib/gis/forms/fields.py
74
4444
from __future__ import unicode_literals from django import forms from django.utils.translation import ugettext_lazy as _ # While this couples the geographic forms to the GEOS library, # it decouples from database (by not importing SpatialBackend). from django.contrib.gis.geos import GEOSException, GEOSGeometry from .widgets import OpenLayersWidget class GeometryField(forms.Field): """ This is the basic form field for a Geometry. Any textual input that is accepted by GEOSGeometry is accepted by this form. By default, this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON. """ widget = OpenLayersWidget geom_type = 'GEOMETRY' default_error_messages = { 'required': _('No geometry value provided.'), 'invalid_geom': _('Invalid geometry value.'), 'invalid_geom_type': _('Invalid geometry type.'), 'transform_error': _('An error occurred when transforming the geometry ' 'to the SRID of the geometry form field.'), } def __init__(self, **kwargs): # Pop out attributes from the database field, or use sensible # defaults (e.g., allow None). self.srid = kwargs.pop('srid', None) self.geom_type = kwargs.pop('geom_type', self.geom_type) super(GeometryField, self).__init__(**kwargs) self.widget.attrs['geom_type'] = self.geom_type def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') # Try to set the srid if not value.srid: try: value.srid = self.widget.map_srid except AttributeError: if self.srid: value.srid = self.srid return value def clean(self, value): """ Validates that the input value can be converted to a Geometry object (which is returned). A ValidationError is raised if the value cannot be instantiated as a Geometry. """ geom = super(GeometryField, self).clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY': raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid and self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except GEOSException: raise forms.ValidationError( self.error_messages['transform_error'], code='transform_error') return geom def _has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except forms.ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data) class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' class PointField(GeometryField): geom_type = 'POINT' class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' class LineStringField(GeometryField): geom_type = 'LINESTRING' class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' class PolygonField(GeometryField): geom_type = 'POLYGON' class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON'
gpl-3.0
mbauskar/frappe
frappe/website/doctype/website_slideshow/website_slideshow.py
3
1134
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document class WebsiteSlideshow(Document): def validate(self): self.validate_images() def on_update(self): # a slide show can be in use and any change in it should get reflected from frappe.website.render import clear_cache clear_cache() def validate_images(self): ''' atleast one image file should be public for slideshow ''' files = map(lambda row: row.image, self.slideshow_items) result = frappe.get_all("File", filters={ "file_url":("in", files) }, fields="is_private") if any([file.is_private for file in result]): frappe.throw(_("All Images attached to Website Slideshow should be public")) def get_slideshow(doc): if not doc.slideshow: return {} slideshow = frappe.get_doc("Website Slideshow", doc.slideshow) return { "slides": slideshow.get({"doctype":"Website Slideshow Item"}), "slideshow_header": slideshow.header or "" }
mit
pein0119/shadowsocks
shadowsocks/lru_cache.py
983
4290
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import collections import logging import time # this LRUCache is optimized for concurrency, not QPS # n: concurrency, keys stored in the cache # m: visits not timed out, proportional to QPS * timeout # get & set is O(1), not O(n). thus we can support very large n # TODO: if timeout or QPS is too large, then this cache is not very efficient, # as sweep() causes long pause class LRUCache(collections.MutableMapping): """This class is not thread safe""" def __init__(self, timeout=60, close_callback=None, *args, **kwargs): self.timeout = timeout self.close_callback = close_callback self._store = {} self._time_to_keys = collections.defaultdict(list) self._keys_to_last_time = {} self._last_visits = collections.deque() self._closed_values = set() self.update(dict(*args, **kwargs)) # use the free update to set keys def __getitem__(self, key): # O(1) t = time.time() self._keys_to_last_time[key] = t self._time_to_keys[t].append(key) self._last_visits.append(t) return self._store[key] def __setitem__(self, key, value): # O(1) t = time.time() self._keys_to_last_time[key] = t self._store[key] = value self._time_to_keys[t].append(key) self._last_visits.append(t) def __delitem__(self, key): # O(1) del self._store[key] del self._keys_to_last_time[key] def __iter__(self): return iter(self._store) def __len__(self): return len(self._store) def sweep(self): # O(m) now = time.time() c = 0 while len(self._last_visits) > 0: least = self._last_visits[0] if now - least <= self.timeout: break if self.close_callback is not None: for key in self._time_to_keys[least]: if key in self._store: if now - self._keys_to_last_time[key] > self.timeout: value = self._store[key] if value not in self._closed_values: self.close_callback(value) self._closed_values.add(value) for key in self._time_to_keys[least]: self._last_visits.popleft() if key in self._store: if now - self._keys_to_last_time[key] > self.timeout: del self._store[key] del self._keys_to_last_time[key] c += 1 del self._time_to_keys[least] if c: self._closed_values.clear() logging.debug('%d keys swept' % c) def test(): c = LRUCache(timeout=0.3) c['a'] = 1 assert c['a'] == 1 time.sleep(0.5) c.sweep() assert 'a' not in c c['a'] = 2 c['b'] = 3 time.sleep(0.2) c.sweep() assert c['a'] == 2 assert c['b'] == 3 time.sleep(0.2) c.sweep() c['b'] time.sleep(0.2) c.sweep() assert 'a' not in c assert c['b'] == 3 time.sleep(0.5) c.sweep() assert 'a' not in c assert 'b' not in c global close_cb_called close_cb_called = False def close_cb(t): global close_cb_called assert not close_cb_called close_cb_called = True c = LRUCache(timeout=0.1, close_callback=close_cb) c['s'] = 1 c['s'] time.sleep(0.1) c['s'] time.sleep(0.3) c.sweep() if __name__ == '__main__': test()
apache-2.0
shirou/ansible
lib/ansible/runner/connection_plugins/libvirt_lxc.py
45
4987
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # (c) 2013, Michael Scherer <misc@zarb.org> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import distutils.spawn import os import subprocess from ansible import errors from ansible.callbacks import vvv class Connection(object): ''' Local lxc based connections ''' def _search_executable(self, executable): cmd = distutils.spawn.find_executable(executable) if not cmd: raise errors.AnsibleError("%s command not found in PATH") % executable return cmd def _check_domain(self, domain): p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain) def __init__(self, runner, host, port, *args, **kwargs): self.lxc = host self.cmd = self._search_executable('virsh') self._check_domain(host) self.runner = runner self.host = host # port is unused, since this is local self.port = port def connect(self, port=None): ''' connect to the lxc; nothing to do here ''' vvv("THIS IS A LOCAL LXC DIR", host=self.lxc) return self def _generate_cmd(self, executable, cmd): if executable: local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] else: local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd) return local_cmd def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): ''' run a command on the chroot ''' if su or su_user: raise errors.AnsibleError("Internal Error: this module does not support running commands via su") if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") # We enter lxc as root so sudo stuff can be ignored local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.lxc) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr) def _normalize_path(self, path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' out_path = self._normalize_path(out_path, '/') vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc) local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path] vvv("EXEC %s" % (local_cmd), host=self.lxc) p = subprocess.Popen(local_cmd, cwd=self.runner.basedir, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate(open(in_path,'rb').read()) def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' in_path = self._normalize_path(in_path, '/') vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc) local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path] vvv("EXEC %s" % (local_cmd), host=self.lxc) p = subprocess.Popen(local_cmd, cwd=self.runner.basedir, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() open(out_path,'wb').write(stdout) def close(self): ''' terminate the connection; nothing to do here ''' pass
gpl-3.0
lianasyrkett/CodeU-Summer-2017
build.py
11
5144
#!/usr/bin/python # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### # This build script is responsible for building and running all modules in # this project. The following commands are supported: # # clean : Remove all files in the output directory. This will not remove the # the output directory itself to allow the output directory to be # a symlink. # # build : Build the full project. This will build all java files found in any # of the src directories. # # rebuild : Call the clean and the build commands. For more information on # CLEAN and BUILD see the above entries. # # run <class path> [ arguments ... ] : Run the specified class. All arguments # after the class path will be passed to # the java class when it runs. ############################################################################### import os import shutil import subprocess import sys # Dictionary of settings that control java source compilation CONFIG = { 'out' : 'bin', 'src' : [ 'src', 'test' ], 'libraries' : [ 'third_party/junit4-4.11.jar', 'third_party/hamcrest-core-1.3.jar' ], 'separators' : { 'nt' : ';', 'posix' : ':' } } # CLEAN # # Remove all files from the build output directory. # def clean(config) : out = config['out'] for entry in [ os.path.join(out, name) for name in os.listdir(out) ] : if os.path.isdir(entry) : shutil.rmtree(entry) else : os.remove(entry) print('Clean PASSED') # BUILD # # Build the project defined by the config object. This will find all source # files in the source directories, link all specified libraries, and write # all output to the out directory. # def build(config) : libraries = config['libraries'] out = config['out'] separator = config['separators'][os.name] src = config['src'] # Find all the java source files in the given source directories. # Non-java source files are ignored. src_files = [ ] for src_path in src : for root, dirs, files in os.walk(src_path) : src_files += [ os.path.join(root, file) for file in files if file.endswith('.java') ] # Take everything so far and construct a single command to build the project. command = [ ] command += [ 'javac' ] command += [ '-d', out ] command += [ '-cp', separator.join([ out ] + libraries) ] command += [ '-Xlint' ] command += src_files print('running : %s' % command) print('Build %s' % ('PASSED' if subprocess.call(command) == 0 else 'FAILED')) # RUN # # Run a class from within the project. # def run(config, start_class_path, arguments): libraries = config['libraries'] out = config['out'] separator = config['separators'][os.name] command = [ ] command += [ 'java' ] command += [ '-cp', separator.join([ out ] + libraries) ] command += [ start_class_path ] command += arguments print 'Running: [', for x in command : print x, print ']' print('Run %s' % ('PASSED' if subprocess.call(command) == 0 else 'FAILED')) # USAGE # # Print basic usage info. # def usage() : print('Usage: python build.py clean | build | rebuild | run | help') print(' clean : Remove all files in the output directory.') print(' This does not remove the root of the output tree.') print(' build : Build the full project. This will build all java files') print(' found in all of the src directories.') print(' rebuild : perform clean followed by build.') print(' run <class path> [ arguments ... ] : Run the specified class.') print(' All arguments after the class path will be passed to') print(' the java class when it runs.') print(' help : Print this helpful message.') # MAIN def main(args) : if len(args) > 1 : command = args[1] if 'help' == command : usage() elif 'clean' == command : clean(CONFIG) elif 'build' == command : build(CONFIG) elif 'rebuild' == command : clean(CONFIG) build(CONFIG) elif 'run' == command : if len(args) > 2 : java_class = args[2] java_params = args[3:] run(CONFIG, java_class, java_params) else : print('Run command requires a java class to run.') usage(); else : print 'Unknown command: [', for x in args : print x, print ']' usage(); else : print ('No parameters provided.') usage() if __name__ == '__main__': main(sys.argv)
apache-2.0